From: Moritz Mühlenhoff Date: Mon, 24 Jul 2023 09:57:34 +0000 (+0100) Subject: openjdk-17 (17.0.8+7-1~deb12u1) bookworm-security; urgency=medium X-Git-Tag: archive/raspbian/17.0.8+7-1_deb12u1+rpi1^2~37 X-Git-Url: https://dgit.raspbian.org/?a=commitdiff_plain;h=4f695265cfb526e72e77f1bd2441ab3e43f03374;p=openjdk-17.git openjdk-17 (17.0.8+7-1~deb12u1) bookworm-security; urgency=medium * Rebuild for bookworm [dgit import unpatched openjdk-17 17.0.8+7-1~deb12u1] --- 4f695265cfb526e72e77f1bd2441ab3e43f03374 diff --cc debian/JAVA_HOME index 000000000,000000000..876e98953 new file mode 100644 --- /dev/null +++ b/debian/JAVA_HOME @@@ -1,0 -1,0 +1,13 @@@ ++ JAVA_HOME ++ ++1. Legacy use of JAVA_HOME ++ ++ As of the latest versions of java-common there really should ++ be no need for JAVA_HOME as Java(TM) should be completely ++ integrated with your Debian GNU/Linux system. ++ ++ ++2. Selecting OpenJDK 7 as the default Java(TM) implementation ++ ++ Please see README.alternatives for instructions on how ++ to make OpenJDK 7 executables and man pages the default on your system. diff --cc debian/JB-archive.applications.in index 000000000,000000000..7f20fd585 new file mode 100644 --- /dev/null +++ b/debian/JB-archive.applications.in @@@ -1,0 -1,0 +1,6 @@@ ++java-archive ++ command=/usr/bin/java -jar ++ name=java-archive ++ can_open_multiple_files=false ++ requires_terminal=false ++ mime_types=application/java-archive,application/x-java-archive,application/x-jar diff --cc debian/JB-archive.keys.in index 000000000,000000000..743e3c8a2 new file mode 100644 --- /dev/null +++ b/debian/JB-archive.keys.in @@@ -1,0 -1,0 +1,18 @@@ ++application/java-archive ++ description=Java Archive ++ icon_filename=@basename@ ++ default_action_type=application ++ default_application_id=java-archive ++ short_list_application_user_additions=java-archive ++application/x-java-archive ++ description=Java Archive ++ icon_filename=@basename@ ++ default_action_type=application ++ default_application_id=java-archive ++ short_list_application_user_additions=java-archive ++application/x-jar ++ description=Java Archive ++ icon_filename=@basename@ ++ default_action_type=application ++ default_application_id=java-archive ++ short_list_application_user_additions=java-archive diff --cc debian/JB-archive.mime.in index 000000000,000000000..e354ead53 new file mode 100644 --- /dev/null +++ b/debian/JB-archive.mime.in @@@ -1,0 -1,0 +1,6 @@@ ++application/java-archive ++ ext: jar ++application/x-java-archive ++ ext: jar ++application/x-jar ++ ext: jar diff --cc debian/JB-dbg.overrides.in index 000000000,000000000..f177b8d14 new file mode 100644 --- /dev/null +++ b/debian/JB-dbg.overrides.in @@@ -1,0 -1,0 +1,7 @@@ ++# this are pre-stripped binaries (fastdebug) ++@basename@-dbg binary: library-in-debug-or-profile-should-not-be-stripped ++@basename@-dbg binary: debug-file-with-no-debug-symbols ++ ++# these are java's .debuginfo files ... ++@basename@-dbg binary: binary-with-bad-dynamic-table ++@basename@-dbg binary: statically-linked-binary diff --cc debian/JB-demo.menu.in index 000000000,000000000..58aec29c3 new file mode 100644 --- /dev/null +++ b/debian/JB-demo.menu.in @@@ -1,0 -1,0 +1,55 @@@ ++?package(@basename@-demo):\ ++ needs="x11"\ ++ section="Applications/Programming"\ ++ title="@vendor@ Java @RELEASE@ FileChooser Demo"\ ++ command="/bin/sh -c 'cd /@basedir@/demo/jfc/FileChooserDemo; /@basedir@/bin/java -jar FileChooserDemo.jar'"\ ++ icon="/usr/share/pixmaps/@basename@-app.xpm"\ ++ hints="Java2" ++ ++?package(@basename@-demo):\ ++ needs="x11"\ ++ section="Applications/Programming"\ ++ title="@vendor@ Java @RELEASE@ Font2DTest Demo"\ ++ command="/bin/sh -c 'cd /@basedir@/demo/jfc/Font2DTest; /@basedir@/bin/java -jar Font2DTest.jar'"\ ++ icon="/usr/share/pixmaps/@basename@-app.xpm"\ ++ hints="Java2" ++ ++?package(@basename@-demo):\ ++ needs="x11"\ ++ section="Applications/Programming"\ ++ title="@vendor@ Java @RELEASE@ Java 2D Demo"\ ++ command="/bin/sh -c 'cd /@basedir@/demo/jfc/Java2D; /@basedir@/bin/java -jar Java2Demo.jar'"\ ++ icon="/usr/share/pixmaps/@basename@-app.xpm"\ ++ hints="Java2" ++ ++?package(@basename@-demo):\ ++ needs="x11"\ ++ section="Applications/Programming"\ ++ title="@vendor@ Java @RELEASE@ SampleTree Demo"\ ++ command="/bin/sh -c 'cd /@basedir@/demo/jfc/SampleTree; /@basedir@/bin/java -jar SampleTree.jar'"\ ++ icon="/usr/share/pixmaps/@basename@-app.xpm"\ ++ hints="Java2" ++ ++?package(@basename@-demo):\ ++ needs="x11"\ ++ section="Applications/Programming"\ ++ title="@vendor@ Java @RELEASE@ Stylepad Demo"\ ++ command="/bin/sh -c 'cd /@basedir@/demo/jfc/Stylepad; /@basedir@/bin/java -jar Stylepad.jar'"\ ++ icon="/usr/share/pixmaps/@basename@-app.xpm"\ ++ hints="Java2" ++ ++?package(@basename@-demo):\ ++ needs="x11"\ ++ section="Applications/Programming"\ ++ title="@vendor@ Java @RELEASE@ SwingSet2 Demo"\ ++ command="/bin/sh -c 'cd /@basedir@/demo/jfc/SwingSet2; /@basedir@/bin/java -jar SwingSet2.jar'"\ ++ icon="/usr/share/pixmaps/@basename@-app.xpm"\ ++ hints="Java2" ++ ++?package(@basename@-demo):\ ++ needs="x11"\ ++ section="Applications/Programming"\ ++ title="@vendor@ Java @RELEASE@ TableExample Demo"\ ++ command="/bin/sh -c 'cd /@basedir@/demo/jfc/TableExample; /@basedir@/bin/java -jar TableExample.jar'"\ ++ icon="/usr/share/pixmaps/@basename@-app.xpm"\ ++ hints="Java2" diff --cc debian/JB-demo.overrides.in index 000000000,000000000..a6454ded8 new file mode 100644 --- /dev/null +++ b/debian/JB-demo.overrides.in @@@ -1,0 -1,0 +1,11 @@@ ++# jpda/examples.jar only contains source code ++@basename@-demo binary: codeless-jar usr/share/doc/openjdk-7-jre-headless/demo/jpda/examples.jar ++ ++# OpenJDK is correct despite any Debian policy ++@basename@-demo binary: unknown-java-class-version ++ ++# example jar file with sources and documentation ++@basename@-demo binary: codeless-jar ++ ++# example jar file with sources ++@basename@-demo binary: jar-contains-source diff --cc debian/JB-doc.doc-base.in index 000000000,000000000..619d9ae5d new file mode 100644 --- /dev/null +++ b/debian/JB-doc.doc-base.in @@@ -1,0 -1,0 +1,11 @@@ ++Document: @JDK@-doc ++Title: @vendor@ Java(TM) @RELEASE@ JDK, Standard Edition Documentation ++Author: @vendor@ ++Abstract: The Java @RELEASE@ Platform Documentation contains API specifications, ++ feature descriptions, developer guides, reference pages for JDK tools ++ and utilities, demos, and links to related information. ++Section: Programming ++ ++Format: HTML ++Index: /usr/share/doc/@basename@-jre-headless/api/index.html ++Files: /usr/share/doc/@basename@-jre-headless/api/*/*.html diff --cc debian/JB-fonts.defoma-hints.in index 000000000,000000000..80212e25b new file mode 100644 --- /dev/null +++ b/debian/JB-fonts.defoma-hints.in @@@ -1,0 -1,0 +1,187 @@@ ++category truetype ++ ++# Sans ++ ++begin /usr/share/fonts/truetype/ttf-lucida/LucidaSansRegular.ttf ++ Family = LucidaSans ++ FontName = LucidaSans ++ Encoding = Unicode ++ Location = English ++ Charset = ISO8859-1 ISO8859-2 ISO8859-7 ISO8859-9 ISO8859-15 ISO10646-1 ++ UniCharset = ISO8859-1 ISO8859-2 ISO8859-7 ISO8859-9 ISO8859-15 ++ GeneralFamily = SansSerif ++ Weight = Book ++ Width = Variable ++ Shape = NoSerif Upright ++ Foundry = Lucida ++ Priority = 15 ++end ++ ++begin /usr/share/fonts/truetype/ttf-lucida/LucidaSansDemiBold.ttf ++ Family = LucidaSans ++ FontName = LucidaSans-Demibold ++ Encoding = Unicode ++ Location = English ++ Charset = ISO8859-1 ISO8859-2 ISO8859-7 ISO8859-9 ISO8859-15 ISO10646-1 ++ UniCharset = ISO8859-1 ISO8859-2 ISO8859-7 ISO8859-9 ISO8859-15 ++ GeneralFamily = SansSerif ++ Weight = Bold ++ Width = Variable ++ Shape = NoSerif Upright ++ Foundry = Lucida ++ Priority = 15 ++end ++ ++begin /usr/share/fonts/truetype/ttf-lucida/LucidaSansOblique.ttf ++ Family = LucidaSans ++ FontName = LucidaSans-Oblique ++ Encoding = Unicode ++ Location = English ++ Charset = ISO8859-1 ISO8859-2 ISO8859-7 ISO8859-9 ISO8859-15 ISO10646-1 ++ UniCharset = ISO8859-1 ISO8859-2 ISO8859-7 ISO8859-9 ISO8859-15 ++ GeneralFamily = SansSerif ++ Weight = Thin ++ Width = Variable ++ Shape = NoSerif Oblique ++ Foundry = Lucida ++ Priority = 15 ++end ++ ++begin /usr/share/fonts/truetype/ttf-lucida/LucidaSansDemiOblique.ttf ++ Family = LucidaSans ++ FontName = LucidaSans-Demibold-Oblique ++ Encoding = Unicode ++ Location = English ++ Charset = ISO8859-1 ISO8859-2 ISO8859-7 ISO8859-9 ISO8859-15 ISO10646-1 ++ UniCharset = ISO8859-1 ISO8859-2 ISO8859-7 ISO8859-9 ISO8859-15 ++ GeneralFamily = SansSerif ++ Weight = Bold ++ Width = Variable ++ Shape = NoSerif Oblique ++ Foundry = Lucida ++ Priority = 15 ++end ++ ++# Serif ++ ++begin /usr/share/fonts/truetype/ttf-lucida/LucidaBrightRegular.ttf ++ Family = LucidaBright ++ FontName = LucidaBright-Regular ++ Encoding = Unicode ++ Location = English ++ Charset = ISO8859-1 ISO8859-2 ISO8859-7 ISO8859-9 ISO8859-15 ISO10646-1 ++ UniCharset = ISO8859-1 ISO8859-2 ISO8859-7 ISO8859-9 ISO8859-15 ++ GeneralFamily = Roman ++ Weight = Book ++ Width = Variable ++ Shape = NoSerif Upright ++ Foundry = Lucida ++ Priority = 15 ++end ++ ++begin /usr/share/fonts/truetype/ttf-lucida/LucidaBrightItalic.ttf ++ Family = LucidaBright ++ FontName = LucidaBright-Italic ++ Encoding = Unicode ++ Location = English ++ Charset = ISO8859-1 ISO8859-2 ISO8859-7 ISO8859-9 ISO8859-15 ISO10646-1 ++ UniCharset = ISO8859-1 ISO8859-2 ISO8859-7 ISO8859-9 ISO8859-15 ++ GeneralFamily = Roman ++ Weight = Book ++ Width = Variable ++ Shape = NoSerif Oblique ++ Foundry = Lucida ++ Priority = 15 ++end ++ ++begin /usr/share/fonts/truetype/ttf-lucida/LucidaBrightDemiBold.ttf ++ Family = Lucida-Bright ++ FontName = Lucida-Bright-Demi ++ Encoding = Unicode ++ Location = English ++ Charset = ISO8859-1 ISO8859-2 ISO8859-7 ISO8859-9 ISO8859-15 ISO10646-1 ++ UniCharset = ISO8859-1 ISO8859-2 ISO8859-7 ISO8859-9 ISO8859-15 ++ GeneralFamily = Roman ++ Weight = Bold ++ Width = Variable ++ Shape = NoSerif Upright ++ Foundry = Lucida ++ Priority = 15 ++end ++ ++begin /usr/share/fonts/truetype/ttf-lucida/LucidaBrightDemiItalic.ttf ++ Family = Lucida-Bright ++ FontName = Lucida-Bright-DemiItalic ++ Encoding = Unicode ++ Location = English ++ Charset = ISO8859-1 ISO8859-2 ISO8859-7 ISO8859-9 ISO8859-15 ISO10646-1 ++ UniCharset = ISO8859-1 ISO8859-2 ISO8859-7 ISO8859-9 ISO8859-15 ++ GeneralFamily = Roman ++ Weight = Demi ++ Width = Variable ++ Shape = NoSerif Oblique ++ Foundry = Lucida ++ Priority = 15 ++end ++ ++# Mono ++ ++begin /usr/share/fonts/truetype/ttf-lucida/LucidaTypewriterRegular.ttf ++ Family = LucidaSans-Typewriter ++ FontName = LucidaSans-Typewriter ++ Encoding = Unicode ++ Location = English ++ Charset = ISO8859-1 ISO8859-2 ISO8859-7 ISO8859-9 ISO8859-15 ISO10646-1 ++ UniCharset = ISO8859-1 ISO8859-2 ISO8859-7 ISO8859-9 ISO8859-15 ++ GeneralFamily = Typewriter ++ Weight = Book ++ Width = Fixed ++ Shape = NoSerif Upright ++ Foundry = Lucida ++ Priority = 15 ++end ++ ++begin /usr/share/fonts/truetype/ttf-lucida/LucidaTypewriterOblique.ttf ++ Family = LucidaSans-Typewriter ++ FontName = LucidaSans-Typewriter-Oblique ++ Encoding = Unicode ++ Location = English ++ Charset = ISO8859-1 ISO8859-2 ISO8859-7 ISO8859-9 ISO8859-15 ISO10646-1 ++ UniCharset = ISO8859-1 ISO8859-2 ISO8859-7 ISO8859-9 ISO8859-15 ++ GeneralFamily = Typewriter ++ Weight = Book ++ Width = Fixed ++ Shape = NoSerif Oblique ++ Foundry = Lucida ++ Priority = 15 ++end ++ ++begin /usr/share/fonts/truetype/ttf-lucida/LucidaTypewriterBold.ttf ++ Family = LucidaSans-Typewriter ++ FontName = LucidaSans-Typewriter-Bold ++ Encoding = Unicode ++ Location = English ++ Charset = ISO8859-1 ISO8859-2 ISO8859-7 ISO8859-9 ISO8859-15 ISO10646-1 ++ UniCharset = ISO8859-1 ISO8859-2 ISO8859-7 ISO8859-9 ISO8859-15 ++ GeneralFamily = Typewriter ++ Weight = Bold ++ Width = Fixed ++ Shape = NoSerif Upright ++ Foundry = Lucida ++ Priority = 15 ++end ++ ++begin /usr/share/fonts/truetype/ttf-lucida/LucidaTypewriterBoldOblique.ttf ++ Family = LucidaSans-Typewriter ++ FontName = LucidaSans-Typewriter-Bold-Oblique ++ Encoding = Unicode ++ Location = English ++ Charset = ISO8859-1 ISO8859-2 ISO8859-7 ISO8859-9 ISO8859-15 ISO10646-1 ++ UniCharset = ISO8859-1 ISO8859-2 ISO8859-7 ISO8859-9 ISO8859-15 ++ GeneralFamily = Typewriter ++ Weight = Bold ++ Width = Fixed ++ Shape = NoSerif Oblique ++ Foundry = Lucida ++ Priority = 15 ++end diff --cc debian/JB-java.desktop.in index 000000000,000000000..eec5e5ca4 new file mode 100644 --- /dev/null +++ b/debian/JB-java.desktop.in @@@ -1,0 -1,0 +1,12 @@@ ++[Desktop Entry] ++Name=@vendor@ Java @RELEASE@ Runtime ++Name[fi]=@vendor@ Java @RELEASE@ - ajonaikainen ympäristö ++Comment=@vendor@ Java @RELEASE@ Runtime ++Comment[fi]=@vendor@ Java @RELEASE@ - ajonaikainen ympäristö ++Keywords=java;runtime ++Exec=@java_launcher@ -jar ++Terminal=false ++Type=Application ++Icon=@basename@ ++MimeType=application/x-java-archive;application/java-archive;application/x-jar; ++NoDisplay=true diff --cc debian/JB-jconsole.desktop.in index 000000000,000000000..8a87820a2 new file mode 100644 --- /dev/null +++ b/debian/JB-jconsole.desktop.in @@@ -1,0 -1,0 +1,11 @@@ ++[Desktop Entry] ++Name=@vendor@ Java @RELEASE@ Console ++Name[fi]=@vendor@ Java @RELEASE@ - konsoli ++Comment=@vendor@ Java @RELEASE@ Monitoring & Management Console ++Comment[fi]=@vendor@ Java @RELEASE@ - valvonta- ja hallintakonsoli ++Keywords=java;console;monitoring ++Exec=/usr/bin/jconsole ++Terminal=false ++Type=Application ++Icon=@basename@ ++Categories=Application;System; diff --cc debian/JB-jdk-headless.postinst.in index 000000000,000000000..0e1ef5feb new file mode 100644 --- /dev/null +++ b/debian/JB-jdk-headless.postinst.in @@@ -1,0 -1,0 +1,75 @@@ ++#!/bin/sh ++ ++set -e ++ ++multiarch=@multiarch@ ++priority=@priority@ ++basedir=/@basedir@ ++mandir=$basedir/man ++jdiralias=@jdiralias@ ++srcext=1.gz ++dstext=1.gz ++jdk_hl_tools='@jdk_hl_tools@' ++ ++case "$1" in ++configure) ++ # obsolete tool ++ if update-alternatives --list apt 2>/dev/null; then ++ update-alternatives --remove-all apt || true ++ fi ++ ++ if [ -z "$2" ]; then ++ update_alternatives=y ++ fi ++ if [ -n "$multiarch" ] && [ -n "$2" ]; then ++ for i in $jdk_hl_tools; do ++ if [ -z "$(update-alternatives --list $i 2>/dev/null | grep ^$basedir/)" ]; then ++ update_alternatives=y ++ break ++ fi ++ done ++ fi ++ if [ "$update_alternatives" != y ] && [ $priority -gt 1060 ]; then ++ for i in $jre_tools; do ++ oldp=$(update-alternatives --query java | awk -v b=$basedir '/^Alternative:/ && $2~b {p=1} /^Priority:/ && p {print $2; exit}') ++ if [ -n "$oldp" ] && [ "$oldp" -le 1060 ]; then ++ update_alternatives=y ++ break ++ fi ++ done ++ fi ++ ++ if [ "$update_alternatives" = y ]; then ++ if [ -n "$multiarch" ] && [ "$DPKG_MAINTSCRIPT_ARCH" != $(dpkg --print-architecture) ]; then ++ priority=$(expr $priority - 1) ++ fi ++ for i in $jdk_hl_tools; do ++ unset slave1 slave2 || true ++ if [ -e $mandir/man1/$i.$srcext ]; then ++ slave1="--slave \ ++ /usr/share/man/man1/$i.$dstext \ ++ $i.$dstext \ ++ $mandir/man1/$i.$srcext" ++ fi ++ if false && [ -e $mandir/ja/man1/$i.$srcext ]; then ++ slave2="--slave \ ++ /usr/share/man/ja/man1/$i.$dstext \ ++ ${i}_ja.$dstext \ ++ $mandir/ja/man1/$i.$srcext" ++ fi ++ update-alternatives \ ++ --install \ ++ /usr/bin/$i \ ++ $i \ ++ $basedir/bin/$i \ ++ $priority \ ++ $slave1 $slave2 ++ done ++ fi # update alternatives ++ ++ ;; ++esac ++ ++#DEBHELPER# ++ ++exit 0 diff --cc debian/JB-jdk-headless.preinst.in index 000000000,000000000..01698fc26 new file mode 100644 --- /dev/null +++ b/debian/JB-jdk-headless.preinst.in @@@ -1,0 -1,0 +1,23 @@@ ++#!/bin/sh ++ ++set -e ++ ++multiarch=@multiarch@ ++old_basedir=/usr/lib/jvm/java-7-openjdk ++jdk_hl_tools='apt extcheck idlj jar jarsigner javac javadoc javah javap jdb jhat jinfo jmap jps jrunscript jsadebugd jstack jstat jstatd native2ascii rmic schemagen serialver wsgen wsimport xjc' ++ ++case "$1" in ++ upgrade) ++ if [ -n "$multiarch" ] && [ -n "$2" ]; then ++ for i in $jdk_hl_tools; do ++ if [ -n "$(update-alternatives --list $i 2>/dev/null | grep ^$old_basedir/)" ]; then ++ update-alternatives --remove $i $old_basedir/bin/$i || true ++ fi ++ done ++ fi ++ ;; ++esac ++ ++#DEBHELPER# ++ ++exit 0 diff --cc debian/JB-jdk-headless.prerm.in index 000000000,000000000..80bf5030a new file mode 100644 --- /dev/null +++ b/debian/JB-jdk-headless.prerm.in @@@ -1,0 -1,0 +1,15 @@@ ++#!/bin/sh -e ++ ++set -e ++ ++jdk_hl_tools='@jdk_hl_tools@' ++basedir=/@basedir@ ++ ++ ++if [ "$1" = "remove" ] || [ "$1" = "deconfigure" ]; then ++ for i in $jdk_hl_tools; do ++ update-alternatives --remove $i $basedir/bin/$i ++ done ++fi ++ ++#DEBHELPER# diff --cc debian/JB-jdk.menu.in index 000000000,000000000..14f11ec2d new file mode 100644 --- /dev/null +++ b/debian/JB-jdk.menu.in @@@ -1,0 -1,0 +1,7 @@@ ++?package(@basename@-jdk):\ ++ needs="x11"\ ++ section="Applications/System/Administration"\ ++ title="@vendor@ Java @RELEASE@ Console"\ ++ command="/usr/bin/jconsole"\ ++ icon="/usr/share/pixmaps/@basename@.xpm"\ ++ hints="Java2" diff --cc debian/JB-jdk.postinst.in index 000000000,000000000..bdffbe3cc new file mode 100644 --- /dev/null +++ b/debian/JB-jdk.postinst.in @@@ -1,0 -1,0 +1,75 @@@ ++#!/bin/sh ++ ++set -e ++ ++multiarch=@multiarch@ ++priority=@priority@ ++basedir=/@basedir@ ++mandir=$basedir/man ++jdiralias=@jdiralias@ ++srcext=1.gz ++dstext=1.gz ++jdk_tools='@jdk_tools@' ++ ++case "$1" in ++configure) ++ # obsolete tool ++ if update-alternatives --list apt 2>/dev/null; then ++ update-alternatives --remove-all apt || true ++ fi ++ ++ if [ -z "$2" ]; then ++ update_alternatives=y ++ fi ++ if [ -n "$multiarch" ] && [ -n "$2" ]; then ++ for i in $jdk_tools; do ++ if [ -z "$(update-alternatives --list $i 2>/dev/null | grep ^$basedir/)" ]; then ++ update_alternatives=y ++ break ++ fi ++ done ++ fi ++ if [ "$update_alternatives" != y ] && [ $priority -gt 1060 ]; then ++ for i in $jre_tools; do ++ oldp=$(update-alternatives --query java | awk -v b=$basedir '/^Alternative:/ && $2~b {p=1} /^Priority:/ && p {print $2; exit}') ++ if [ -n "$oldp" ] && [ "$oldp" -le 1060 ]; then ++ update_alternatives=y ++ break ++ fi ++ done ++ fi ++ ++ if [ "$update_alternatives" = y ]; then ++ if [ -n "$multiarch" ] && [ "$DPKG_MAINTSCRIPT_ARCH" != $(dpkg --print-architecture) ]; then ++ priority=$(expr $priority - 1) ++ fi ++ for i in $jdk_tools; do ++ unset slave1 slave2 || true ++ if [ -e $mandir/man1/$i.$srcext ]; then ++ slave1="--slave \ ++ /usr/share/man/man1/$i.$dstext \ ++ $i.$dstext \ ++ $mandir/man1/$i.$srcext" ++ fi ++ if false && [ -e $mandir/ja/man1/$i.$srcext ]; then ++ slave2="--slave \ ++ /usr/share/man/ja/man1/$i.$dstext \ ++ ${i}_ja.$dstext \ ++ $mandir/ja/man1/$i.$srcext" ++ fi ++ update-alternatives \ ++ --install \ ++ /usr/bin/$i \ ++ $i \ ++ $basedir/bin/$i \ ++ $priority \ ++ $slave1 $slave2 ++ done ++ fi # update alternatives ++ ++ ;; ++esac ++ ++#DEBHELPER# ++ ++exit 0 diff --cc debian/JB-jdk.prerm.in index 000000000,000000000..f51219574 new file mode 100644 --- /dev/null +++ b/debian/JB-jdk.prerm.in @@@ -1,0 -1,0 +1,15 @@@ ++#!/bin/sh -e ++ ++set -e ++ ++jdk_tools='@jdk_tools@' ++basedir=/@basedir@ ++ ++ ++if [ "$1" = "remove" ] || [ "$1" = "deconfigure" ]; then ++ for i in $jdk_tools; do ++ update-alternatives --remove $i $basedir/bin/$i ++ done ++fi ++ ++#DEBHELPER# diff --cc debian/JB-jre-headless.overrides.in index 000000000,000000000..373a24206 new file mode 100644 --- /dev/null +++ b/debian/JB-jre-headless.overrides.in @@@ -1,0 -1,0 +1,11 @@@ ++# empty directory by intent ++@basename@-jre-headless binary: package-contains-empty-directory usr/share/binfmts/ ++ ++# Strip libjvm.so with --strip-debug instead of --strip-unneeded. LP: #574997. ++@basename@-jre-headless binary: unstripped-binary-or-object ++ ++# Ignore the mouse cursors ++@basename@-jre-headless binary: image-file-in-usr-lib ++ ++# Just an empty directory ++@basename@-jre-headless binary: debug-package-should-be-named-dbg diff --cc debian/JB-jre-headless.postinst.in index 000000000,000000000..b764c4742 new file mode 100644 --- /dev/null +++ b/debian/JB-jre-headless.postinst.in @@@ -1,0 -1,0 +1,106 @@@ ++#!/bin/sh ++ ++set -e ++ ++multiarch=@multiarch@ ++priority=@priority@ ++basedir=/@basedir@ ++mandir=$basedir/man ++srcext=1.gz ++dstext=1.gz ++jre_tools='@jre_hl_tools@' ++ ++case "$1" in ++configure) ++ # fail early. java currently uses tricks to find its own shared ++ # libraries depending on the path of the binary. Will be changed ++ # in OpenJDK7 ++ if ! mountpoint -q /proc; then ++ echo >&2 "the java command requires a mounted proc fs (/proc)." ++ exit 1 ++ fi ++ ++ [ -d /etc/.java ] || mkdir -m 755 /etc/.java ++ [ -d /etc/.java/.systemPrefs ] || mkdir -m 755 /etc/.java/.systemPrefs ++ if [ ! -f /etc/.java/.systemPrefs/.system.lock ]; then ++ touch /etc/.java/.systemPrefs/.system.lock ++ chmod 644 /etc/.java/.systemPrefs/.system.lock ++ fi ++ if [ ! -f /etc/.java/.systemPrefs/.systemRootModFile ]; then ++ touch /etc/.java/.systemPrefs/.systemRootModFile ++ chmod 644 /etc/.java/.systemPrefs/.systemRootModFile ++ fi ++ ++ if [ -z "$2" ]; then ++ update_alternatives=y ++ fi ++ if [ -n "$multiarch" ] && [ -n "$2" ]; then ++ for i in $jre_tools; do ++ if [ -z "$(update-alternatives --list $i 2>/dev/null | grep ^$basedir/)" ]; then ++ update_alternatives=y ++ break ++ fi ++ done ++ fi ++ if [ "$update_alternatives" != y ] && [ $priority -gt 1060 ]; then ++ for i in $jre_tools; do ++ oldp=$(update-alternatives --query java | awk -v b=$basedir '/^Alternative:/ && $2~b {p=1} /^Priority:/ && p {print $2; exit}') ++ if [ -n "$oldp" ] && [ "$oldp" -le 1060 ]; then ++ update_alternatives=y ++ break ++ fi ++ done ++ fi ++ ++ if [ "$update_alternatives" = y ]; then ++ if [ -n "$multiarch" ] && [ "$DPKG_MAINTSCRIPT_ARCH" != $(dpkg --print-architecture) ]; then ++ priority=$(expr $priority - 1) ++ fi ++ for i in $jre_tools; do ++ unset slave1 slave2 || true ++ if [ -e $mandir/man1/$i.$srcext ]; then ++ slave1="--slave \ ++ /usr/share/man/man1/$i.$dstext \ ++ $i.$dstext \ ++ $mandir/man1/$i.$srcext" ++ fi ++ # disabled ++ if false && [ -e $mandir/ja/man1/$i.$srcext ]; then ++ slave2="--slave \ ++ /usr/share/man/ja/man1/$i.$dstext \ ++ ${i}_ja.$dstext \ ++ $mandir/ja/man1/$i.$srcext" ++ fi ++ update-alternatives \ ++ --install \ ++ /usr/bin/$i \ ++ $i \ ++ $basedir/bin/$i \ ++ $priority \ ++ $slave1 $slave2 ++ done ++ update-alternatives \ ++ --install /usr/bin/jexec jexec $basedir/lib/jexec $priority \ ++ --slave \ ++ /usr/share/binfmts/jar \ ++ jexec-binfmt \ ++ $basedir/lib/jar.binfmt ++ fi # update alternatives ++ ++ # register binfmt; ignore errors, the alternative may already be ++ # registered by another JRE. ++ if which update-binfmts >/dev/null && [ -r /usr/share/binfmts/jar ]; then ++ update-binfmts --package @basename@ --import jar || true ++ fi ++ ++ # Now that java is fully registered and configured, ++ # call update-ca-certificates-java ++ dpkg-trigger update-ca-certificates-java ++ ++ ;; ++ ++esac ++ ++#DEBHELPER# ++ ++exit 0 diff --cc debian/JB-jre-headless.postrm.in index 000000000,000000000..e605e9633 new file mode 100644 --- /dev/null +++ b/debian/JB-jre-headless.postrm.in @@@ -1,0 -1,0 +1,24 @@@ ++#!/bin/sh ++ ++set -e ++ ++basedir=/@basedir@ ++multiarch=@multiarch@ ++jdirname=@jdirname@ ++etcdir=/@etcdir@ ++ ++case "$1" in ++purge) ++ if [ -z "$jdirname" ] || [ -z "$etcdir" ]; then ++ echo >&2 "$(basename $0): Internal error" ++ exit 1 ++ fi ++ rm -f $basedir/lib/*/classes.jsa ++ ++ # removals of config files in /etc is handled by dpkg ++ ++ # XXX should remove /etc/.java ??? ++ ;; ++esac ++ ++#DEBHELPER# diff --cc debian/JB-jre-headless.prerm.in index 000000000,000000000..89e7799d2 new file mode 100644 --- /dev/null +++ b/debian/JB-jre-headless.prerm.in @@@ -1,0 -1,0 +1,24 @@@ ++#!/bin/sh ++ ++set -e ++ ++basedir=/@basedir@ ++jre_tools='@jre_hl_tools@' ++ ++if [ "$1" = "remove" ] || [ "$1" = "deconfigure" ]; then ++ for i in $jre_tools; do ++ update-alternatives --remove $i $basedir/bin/$i ++ done ++ ++ if which update-binfmts >/dev/null; then ++ # try to remove and ignore the error ++ if [ -e /var/lib/binfmts/@basename@ ]; then ++ update-binfmts --package @basename@ \ ++ --remove jar /usr/bin/jexec || true ++ fi ++ fi ++ ++ update-alternatives --remove jexec $basedir/lib/jexec ++fi ++ ++#DEBHELPER# diff --cc debian/JB-jre-zero.overrides.in index 000000000,000000000..1616f866a new file mode 100644 --- /dev/null +++ b/debian/JB-jre-zero.overrides.in @@@ -1,0 -1,0 +1,2 @@@ ++# Strip libjvm.so with --strip-debug instead of --strip-unneeded. LP: #574997. ++@basename@-jre-zero binary: unstripped-binary-or-object diff --cc debian/JB-jre.menu.in index 000000000,000000000..024b63fcf new file mode 100644 --- /dev/null +++ b/debian/JB-jre.menu.in @@@ -1,0 -1,0 +1,7 @@@ ++?package(@basename@-jre):\ ++ needs="x11"\ ++ section="Applications/System/Administration"\ ++ title="@vendor@ Java @RELEASE@ Policy Tool"\ ++ command="/usr/bin/policytool"\ ++ icon="/usr/share/pixmaps/@basename@.xpm"\ ++ hints="Java2" diff --cc debian/JB-jre.overrides.in index 000000000,000000000..679d44c49 new file mode 100644 --- /dev/null +++ b/debian/JB-jre.overrides.in @@@ -1,0 -1,0 +1,6 @@@ ++# these are in the -jre-headless package, -jre depends on it ++@basename@-jre binary: desktop-command-not-in-package ++ ++# LP: #2012326 - in order to print allocation locations, ++# the libraries need to retain symbols ++@basename@-jre binary: unstripped-binary-or-object diff --cc debian/JB-jre.postinst.in index 000000000,000000000..84edc96f7 new file mode 100644 --- /dev/null +++ b/debian/JB-jre.postinst.in @@@ -1,0 -1,0 +1,71 @@@ ++#!/bin/sh ++ ++set -e ++ ++multiarch=@multiarch@ ++priority=@priority@ ++basedir=/@basedir@ ++mandir=$basedir/man ++srcext=1.gz ++dstext=1.gz ++jre_tools='@jre_tools@' ++ ++case "$1" in ++configure) ++ if [ -z "$2" ]; then ++ update_alternatives=y ++ fi ++ if [ -n "$multiarch" ] && [ -n "$2" ]; then ++ for i in $jre_tools; do ++ if [ -z "$(update-alternatives --list $i 2>/dev/null | grep ^$basedir/)" ]; then ++ update_alternatives=y ++ break ++ fi ++ done ++ fi ++ if [ "$update_alternatives" != y ] && [ $priority -gt 1060 ]; then ++ for i in $jre_tools; do ++ oldp=$(update-alternatives --query java | awk -v b=$basedir '/^Alternative:/ && $2~b {p=1} /^Priority:/ && p {print $2; exit}') ++ if [ -n "$oldp" ] && [ "$oldp" -le 1060 ]; then ++ update_alternatives=y ++ break ++ fi ++ done ++ fi ++ ++ if [ "$update_alternatives" = y ]; then ++ if [ -n "$multiarch" ] && [ "$DPKG_MAINTSCRIPT_ARCH" != $(dpkg --print-architecture) ]; then ++ priority=$(expr $priority - 1) ++ fi ++ for i in $jre_tools; do ++ unset slave1 slave2 || true ++ if [ -e $mandir/man1/$i.$srcext ]; then ++ slave1="--slave \ ++ /usr/share/man/man1/$i.$dstext \ ++ $i.$dstext \ ++ $mandir/man1/$i.$srcext" ++ fi ++ # disabled ++ if false && [ -e $mandir/ja/man1/$i.$srcext ]; then ++ slave2="--slave \ ++ /usr/share/man/ja/man1/$i.$dstext \ ++ ${i}_ja.$dstext \ ++ $mandir/ja/man1/$i.$srcext" ++ fi ++ update-alternatives \ ++ --install \ ++ /usr/bin/$i \ ++ $i \ ++ $basedir/bin/$i \ ++ $priority \ ++ $slave1 $slave2 ++ done ++ fi # update alternatives ++ ++ ;; ++ ++esac ++ ++#DEBHELPER# ++ ++exit 0 diff --cc debian/JB-jre.prerm.in index 000000000,000000000..55cd79052 new file mode 100644 --- /dev/null +++ b/debian/JB-jre.prerm.in @@@ -1,0 -1,0 +1,14 @@@ ++#!/bin/sh ++ ++set -e ++ ++basedir=/@basedir@ ++jre_tools='@jre_tools@' ++ ++if [ "$1" = "remove" ] || [ "$1" = "deconfigure" ]; then ++ for i in $jre_tools; do ++ update-alternatives --remove $i $basedir/bin/$i ++ done ++fi ++ ++#DEBHELPER# diff --cc debian/README.Debian index 000000000,000000000..1d1e452c9 new file mode 100644 --- /dev/null +++ b/debian/README.Debian @@@ -1,0 -1,0 +1,65 @@@ ++openjdk-9 for Debian/Ubuntu ++--------------------------- ++ ++The OpenJDK build is configured --with-additional-vms to build with ++different virtual machines. The original implementation of the hotspot ++VM is only available on the amd64, i386, lpia and sparc architectures. ++Other VM's: Zero, providing a byte code interpreter for every architecture. ++ ++To use a different VM other than the default, use ++ ++ java -zero ++ ++or for the java tools, use ++ ++ -J-zero. ++ ++To change the default permanently, edit /etc/java-9-openjdk/jvm.cfg. ++ ++The Zero VM can be found in the openjdk-9-jre-zero package (on the ++architectures where the Hotspot VM is available). ++ ++Please look for further documentation in the directory ++/usr/share/doc/openjdk-9-jre/ . ++ ++The package openjdk-9-jre-headless ships a cgi script ++/usr/lib/jvm/java-9-openjdk/bin/java-rmi.cgi that you must integrate ++into your webserver setup manually if you need it. It is not activated ++automatically. ++ ++ ++Note for non-reparenting window manager users ++--------------------------------------------- ++ ++If you are using a non-reparenting window manager, such as ratpoison, awesome ++or dwm, some Java graphical applications using the AWT toolkit will only ++display empty grey windows, as described in but #508650. ++ ++There are two solutions to work around this issue: ++1. mask your window manager as one of the non-reparenting ones supported by AWT, ++ using the wmname tool from the ++ suckless-tools package: ++ $ wmname LG3D ++2. set the environment variable _JAVA_AWT_WM_NONREPARENTING: ++ $ export _JAVA_AWT_WM_NONREPARENTING=true ++ ++You can automate these tasks by writing them to your ~/.xsessionrc: ++$ cat >> ~/.xsessionrc < Sun, 03 May 2009 13:58:10 +0200 ++ -- Torsten Werner Wed, 02 Apr 2008 11:46:53 +0200 diff --cc debian/README.alternatives.in index 000000000,000000000..19d46fb31 new file mode 100644 --- /dev/null +++ b/debian/README.alternatives.in @@@ -1,0 -1,0 +1,35 @@@ ++Updating alternatives for JRE/JDK installations ++----------------------------------------------- ++ ++Packages providing a runtime or a development environment for the Java ++language all use symlinks in the /etc/alternatives directory to enable ++the system administrator to choose which programs to use for java, ++javac, javah, etc. ++ ++The update-java-alternatives script can be used to set all the JRE/JDK ++alternatives: ++ ++First, you have to decide which Java implementation to default to. ++ ++ update-java-alternatives --list ++ ++tells about the available runtime and/or development environments. The ++first column shows the names to be used in for the following examples. ++ ++- Set all runtime tools to point to the alternatives: ++ ++ update-java-alternatives --jre --set ++ ++- Set all runtime tools (headless only) to point to the ++ alternatives: ++ ++ update-java-alternatives --jre-headless --set ++ ++- Set all runtime and development tools to point to the ++ alternatives: ++ ++ update-java-alternatives --set ++ ++- Set all runtime and development tools to auto mode: ++ ++ update-java-alternatives --auto diff --cc debian/README.source index 000000000,000000000..87678eb78 new file mode 100644 --- /dev/null +++ b/debian/README.source @@@ -1,0 -1,0 +1,76 @@@ ++Content of source package ++========================= ++"openjdk-8" is a composite source package: ++- Multiple tarballs from each Oracle mercurial repositories : ++ corba / hotspot / jaxp / jaxws / jdk-dfsg / langtools / openjdk ++- Tarball from JamVM . ++- Debian packaging scripts and patches. ++ ++Building a new OpenJDK-8 Debian package ++=================================================== ++Pre-requisites: ++- Mercurial ++- Bazaar ++ sudo aptitude install mercurial bzr ++ ++0) Clone OpenJDK debian packaging: ++ bzr branch lp:~openjdk/openjdk/openjdk7 openjdk7/ ++ or update existing repository clone: ++ (cd openjdk7/; bzr pull --remember lp:~openjdk/openjdk/openjdk7) ++ ++1) Clone IcedTea-7 HG: ++ hg clone http://icedtea.classpath.org/hg/icedtea7 icedtea-2.1.1/ ++ or update existing repository clone: ++ (cd icedtea7/; hg pull -u) ++ ++2) Download OpenJDK upstream tarballs and move them to $BUILD/ ++ export BUILD=7u3 ++ mkdir -p $BUILD/ ++ ( ++ cd icedtea-2.1.1/ ++ sh autogen.sh && ./configure --enable-jamvm ++ make download ++ ) ++ ++ mv icedtea-2.1.1/*.tar.gz $BUILD/ ++ ( ++ cd icedtea-2.1.1/ ++ make distclean ++ ) ++ ++3) Remove files with unclear license from jdk.tar.gz: ++ sh openjdk7/generate-dfsg-zip.sh $BUILD/jdk.tar.gz ++ sh openjdk7/generate-dfsg-zip.sh $BUILD/langtools.tar.gz ++ ++4) Check tarballs' name and version (for original directory name) ++ in openjdk7/generate-debian-orig.sh and package version in ++ openjdk7/changelog ++ ++5) Generate "orig" directory and "orig+debian" directory ++ sh openjdk7/generate-debian-orig.sh ++ ++Support for DEB_BUILD_OPTIONS ++============================= ++As described in Debian Policy §4.9.1, current package's debian/rules support ++the standardized environment variable DEB_BUILD_OPTIONS. This variable can ++contain several flags to change how a package is compiled and built. ++ ++ * nocheck: don't run jtreg test suites. ++ * nodocs: pass --disable-docs to IcedTea configure script, which ++ disable Javadoc generation. ++ * nostrip: debugging symbols should not be stripped from the ++ binary during installation ++ * parallel=X: package should be built using up to X parallel processes ++ * nobootstrap: don't use gcj to bootstrap OpenJDK but use existing OpenJDK ++ installed on current machine. ++ * noaltzero: don't build alternative Zero VM. ++ * noaltshark: don't build alternative Shark VM. ++ * noaltjamvm: don't build alternative Jamvm VM. ++ ++Sample usage: ++DEB_BUILD_OPTIONS="parallel=5 nocheck" debuild -i"(\.hg|\.bzr)" ++ ++ Build with 5 parallel processes and don't run jtreg test suites. ++ ++This also work with cowbuilder/pbuilder: ++DEB_BUILD_OPTIONS="parallel=5 nocheck" pdebuild -i"(\.hg|\.bzr)" --logfile ../build.log --pbuilder cowbuilder diff --cc debian/TODO index 000000000,000000000..e131d33b8 new file mode 100644 --- /dev/null +++ b/debian/TODO @@@ -1,0 -1,0 +1,10 @@@ ++to remove: ++hotspot-set-compiler.diff ++hotspot-warn-no-errformat.diff ++ ++evaluate: ++ld-symbolic-functions.diff still needed? ++hotspot-sparc-arch.diff ++pass-extra-flags.diff ++hotspot-disable-werror.diff ++hotspot-undefined-target-cpu.diff diff --cc debian/accessibility-atk.properties.disabled index 000000000,000000000..c1bdc487b new file mode 100644 --- /dev/null +++ b/debian/accessibility-atk.properties.disabled @@@ -1,0 -1,0 +1,10 @@@ ++# ++# The following line specifies the assistive technology classes ++# that should be loaded into the Java VM when the AWT is initailized. ++# Specify multiple classes by separating them with commas. ++# Note: the line below cannot end the file (there must be at ++# a minimum a blank line following it). ++# ++# Doesn't work, see LP: #935296 ++#assistive_technologies=org.GNOME.Accessibility.AtkWrapper ++ diff --cc debian/accessibility-atk.properties.enabled index 000000000,000000000..b58c49249 new file mode 100644 --- /dev/null +++ b/debian/accessibility-atk.properties.enabled @@@ -1,0 -1,0 +1,8 @@@ ++# ++# The following line specifies the assistive technology classes ++# that should be loaded into the Java VM when the AWT is initailized. ++# Specify multiple classes by separating them with commas. ++# Note: the line below cannot end the file (there must be at ++# a minimum a blank line following it). ++# ++assistive_technologies=org.GNOME.Accessibility.AtkWrapper diff --cc debian/accessibility.properties index 000000000,000000000..e0095896c new file mode 100644 --- /dev/null +++ b/debian/accessibility.properties @@@ -1,0 -1,0 +1,9 @@@ ++# ++# The following line specifies the assistive technology classes ++# that should be loaded into the Java VM when the AWT is initialized. ++# Specify multiple classes by separating them with commas. ++# Note: the line below cannot end the file (there must be at ++# a minimum a blank line following it). ++# ++assistive_technologies=org.GNOME.Accessibility.JavaBridge ++ diff --cc debian/apport-hook.py index 000000000,000000000..bc698a3a6 new file mode 100644 --- /dev/null +++ b/debian/apport-hook.py @@@ -1,0 -1,0 +1,46 @@@ ++'''Apport package hook for openjdk-11 packages. ++ ++Copyright (C) 2017 Canonical Ltd. ++Author: Tiago Stürmer Daitx ''' ++ ++import os ++import re ++import sys ++from apport.hookutils import * ++ ++def si_units(size): ++ for unit in ['KiB', 'MiB', 'GiB']: ++ size /= 1024 ++ if size < 1024: ++ break ++ return '{0:.1f} {1}'.format(size, unit) ++ ++def add_info(report, ui=None): ++ attach_conffiles(report,'openjdk-11-jre-headless', ui=ui) ++ ++ if report['ProblemType'] == 'Crash' and 'ProcCwd' in report: ++ # attach hs_err_.pid file ++ cwd = report['ProcCwd'] ++ pid_line = re.search("Pid:\t(.*)\n", report["ProcStatus"]) ++ if pid_line: ++ pid = pid_line.groups()[0] ++ path = "%s/hs_err_pid%s.log" % (cwd, pid) ++ # make sure if exists ++ if os.path.exists(path): ++ content = read_file(path) ++ # truncate if bigger than 100 KB ++ # see LP: #1696814 ++ max_length = 100*1024 ++ if sys.getsizeof(content) < max_length: ++ report['HotspotError'] = content ++ report['Tags'] += ' openjdk-hs-err' ++ else: ++ report['HotspotError'] = content[:max_length] + \ ++ "\n[truncated by openjdk-11 apport hook]" + \ ++ "\n[max log size is %s, file size was %s]" % \ ++ (si_units(max_length), si_units(sys.getsizeof(content))) ++ report['Tags'] += ' openjdk-hs-err' ++ else: ++ report['HotspotError'] = "File not found: %s" % path ++ else: ++ report['HotspotError'] = "PID not found in ProcStatus entry." diff --cc debian/buildwatch.sh index 000000000,000000000..8521c4a60 new file mode 100644 --- /dev/null +++ b/debian/buildwatch.sh @@@ -1,0 -1,0 +1,67 @@@ ++#! /bin/sh ++ ++# ++# Output something to stdout every so often so buildd won't kill ++# the build when building ++# ++ ++builddir=$1 ++ ++echo $$ > buildwatch.pid ++ ++time_unit="m" ++timer=0 ++sleep_for=3 ++time_up_at=180 ++upd_every=30 # use a multiple of $sleep_for ++ ++reset_timer() { timer=0; } ++inc_timer() { timer=$(expr $timer + $sleep_for); } ++time_up() { [ $timer -ge $time_up_at ]; } ++can_update() { [ $(expr $timer % $upd_every) -eq 0 ]; } ++do_sleep() { sleep ${sleep_for}${time_unit} && inc_timer; } ++ ++is_running() { ++ ps x | grep -v grep | egrep -qs $@ ++ return $? ++} ++ ++cleanup() { ++ # find any hs_err_pid files generated during the build and print them out ++ # this helps debugging what went wrong during builds ++ find . -type f -name 'hs_err_pid*.log' -printf "[$0] === HOTSPOT ERROR LOG ===\n[$0] %p (last modification at %t)\n" -exec cat {} \; ++} ++ ++for sig in INT QUIT HUP TERM; do trap "cleanup; trap - $sig EXIT; kill -s $sig "'"$$"' "$sig"; done ++trap cleanup EXIT ++ ++while ! time_up; do ++ if [ ! -f buildwatch.pid ]; then ++ echo "[$0] pidfile removed" && break ++ fi ++ if ! is_running '/make'; then ++ echo "[$0] no make process detected (build done?)" && break ++ fi ++ ++ do_sleep ++ can_update || continue ++ ++ new_noisy=$(ls -l test/jtreg_output-* 2>&1 | md5sum) ++ new_quiet=$(ls -l $builddir/openjdk*/build/*/tmp/rt-orig.jar $builddir/openjdk*/build/*/lib/tools.jar $builddir/openjdk*/build/*/lib/ct.sym 2>&1 | md5sum) ++ if [ -n "$old_noisy" -a "$old_noisy" != "$new_noisy" ]; then ++ # jtreg updated test files, so it should be updating stdout in its own ++ # keep quiet and restart timer ++ reset_timer ++ elif [ -n "$old_quiet" -a "$old_quiet" != "$new_quiet" ]; then ++ reset_timer ++ echo "[$0] assembling jar file ..." ++ elif is_running '/cc1|jar|java|gij'; then ++ echo "[$0] compiler/java/jar running ..." ++ reset_timer ++ fi ++ old_noisy=$new_noisy ++ old_quiet=$new_quiet ++done ++ ++echo "[$0] exiting" ++rm -f buildwatch.pid diff --cc debian/changelog index 000000000,000000000..5176369e1 new file mode 100644 --- /dev/null +++ b/debian/changelog @@@ -1,0 -1,0 +1,5519 @@@ ++openjdk-17 (17.0.8+7-1~deb12u1) bookworm-security; urgency=medium ++ ++ * Rebuild for bookworm ++ ++ -- Moritz Mühlenhoff Mon, 24 Jul 2023 11:57:34 +0200 ++ ++openjdk-17 (17.0.8+7-1) unstable; urgency=high ++ ++ * OpenJDK 17.0.8 release, build 7. ++ - CVE-2023-22006, CVE-2023-22036, CVE-2023-22041, CVE-2023-22044, ++ CVE-2023-22045, CVE-2023-22049, CVE-2023-25193. ++ - Release notes: ++ https://www.oracle.com/java/technologies/javase/17-0-8-relnotes.html#R17_0_8 ++ ++ * Don't run the tests on powerpc, hangs on the buildd. ++ * Refresh patches. ++ ++ -- Matthias Klose Wed, 19 Jul 2023 08:29:02 +0200 ++ ++openjdk-17 (17.0.8~6-5) unstable; urgency=medium ++ ++ * Revert back to the riscv64 hotspot patch to v7. ++ ++ -- Matthias Klose Wed, 12 Jul 2023 14:33:08 +0200 ++ ++openjdk-17 (17.0.8~6-4) unstable; urgency=medium ++ ++ [ Matthias Klose ] ++ * Update the riscv64 hotspot patch to v9. ++ * Run the hotspot tests on riscv64. ++ * Link with --no-as-needed. Closes: #1031521. ++ * d/rules: Remove EXTRA_.*FLAGS_JDK macros. ++ * Fix FTCBFS: Add libffi-dev:native to B-D (Helmut Grohne). ++ ++ [ Vladimir Petko ] ++ * Disable runtime/jni/nativeStack/TestNativeStack.java for armhf pending ++ upstream fix. ++ ++ -- Matthias Klose Mon, 10 Jul 2023 16:41:54 +0200 ++ ++openjdk-17 (17.0.8~6-3) unstable; urgency=medium ++ ++ [ Vladimir Petko ] ++ * Use libtestng7-java as jtreg6 dependency as TestNG 7.x is required ++ at runtime. ++ * Regenerate the control file. ++ ++ -- Matthias Klose Sat, 01 Jul 2023 09:19:52 +0200 ++ ++openjdk-17 (17.0.8~6-2) unstable; urgency=medium ++ ++ * Provide versioned java-runtime, java-runtime-headless, java-sdk ++ and java-sdk-headless virtual packages (Emmanuel Bourg). Closes: #1023869. ++ * Install jhsb binary and man page on riscv64. ++ * Bump standards version. ++ ++ -- Matthias Klose Thu, 29 Jun 2023 07:23:06 +0200 ++ ++openjdk-17 (17.0.8~6-1) experimental; urgency=medium ++ ++ * OpenJDK 17.0.8 early access, build 6. ++ * Bump debhelper version to 11. ++ ++ -- Matthias Klose Tue, 27 Jun 2023 12:05:37 +0200 ++ ++openjdk-17 (17.0.7+7-2) unstable; urgency=medium ++ ++ [ Vladimir Petko ] ++ * d/rules: backport testng selection logic. ++ ++ [ Matthias Klose ] ++ * Apply the backport patch for 8276799 (RISC-V Hotspot). ++ * Build both JREs (hotspot and zero) on riscv64. ++ ++ -- Matthias Klose Mon, 26 Jun 2023 15:58:27 +0200 ++ ++openjdk-17 (17.0.7+7-1) unstable; urgency=high ++ ++ * OpenJDK 17.0.7 release, build 7. ++ - CVE-2023-21930, CVE-2023-21937, CVE-2023-21938, CVE-2023-21939, ++ CVE-2023-21954, CVE-2023-21967, CVE-2023-21968. ++ - Release notes: ++ https://mail.openjdk.org/pipermail/jdk-updates-dev/2023-April/021899.html ++ ++ [ Vladimir Petko ] ++ * Refresh patches. ++ * debian/copyright: Convert to machine readable format. ++ * Update watch file. ++ * Update tag and version handling in the rules file. ++ * debian/JB-jre-headless.postinst.in: trigger ca-certificates-java after ++ the JRE is set up. ++ * d/control: add jtreg6 dependencies, regenerate control. ++ * d/rules: only compile google tests when with_check is enabled, disable them ++ for bullseye and jammy. ++ * d/rules: always use jtreg6. ++ * d/p/exclude-broken-tests.patch: add OpenJDK 17 failures. ++ * d/p/*: add patches for jtreg tests: ++ - disable-thumb-assertion.patch: fix JDK-8305481. ++ - update-assertion-for-armhf.patch: fix JDK-8305480. ++ - misalign-pointer-for-armhf.patch: packaging-specific patch to fix test ++ - failure introduced by d/p/m68k-support.diff. ++ - log-generated-classes-test.patch: workaround JDK-8166162. ++ - update-permission-test.patch: add security permissions for testng 7. ++ - ldap-timeout-test-use-ip.patch, test-use-ip-address.patch: Ubuntu-specific ++ - patches to workaround missing DNS resolver on the build machines. ++ - exclude_broken_tests.patch: quarantine failing tests. ++ * d/t/{jdk,hotspot,jaxp,lantools}: run tier1 and tier2 jtreg tests only, ++ * add test options from OpenJDK Makefile, patch problem list to exclude ++ architecture-specific failing tests. ++ * d/t/*: fix test environment: add missing -nativepath (LP: #2001563). ++ * d/t/jdk: provide dbus session for the window manager (LP: #2001576). ++ * d/t/jtreg-autopkgtest.in: pass JTREG home to locate junit.jar, regenerate ++ * d/t/jtreg-autopkgtest.sh (LP: #2016206). ++ * d/rules: pack external debug symbols with build-id, do not strip JVM shared ++ libraries (LP: #2012326, LP: #2016739). ++ * drop d/p/{jaw-classpath.diff, jaw-optional.diff}: the atk wrapper is ++ disabled and these patches cause class data sharing tests to fail. ++ LP: #2016194. ++ ++ -- Matthias Klose Tue, 06 Jun 2023 13:36:52 +0200 ++ ++openjdk-17 (17.0.6+10-1) unstable; urgency=high ++ ++ * OpenJDK 17.0.6 release, build 10. ++ - CVE-2023-21835, CVE-2023-21843 ++ - Release notes: ++ https://www.oracle.com/java/technologies/javase/17-0-6-relnotes.html ++ ++ [ Vladimir Petko ] ++ * debian/patches/*: Refresh patches for the new release and drop unused ++ patches. ++ * debian/rules: add lunar to jtreg version selection. ++ ++ -- Matthias Klose Thu, 26 Jan 2023 11:36:16 +0100 ++ ++openjdk-17 (17.0.5+8-2) unstable; urgency=medium ++ ++ * Fix the binary-indep only build. ++ ++ -- Matthias Klose Wed, 19 Oct 2022 16:23:50 +0200 ++ ++openjdk-17 (17.0.5+8-1) unstable; urgency=high ++ ++ * OpenJDK 17.0.5+8 (release). ++ * Security fixes ++ - JDK-8289366: Improve HTTP/2 client usage. ++ - JDK-8288508: Enhance ECDSA usage. ++ - JDK-8286918: Better HttpServer service. ++ - JDK-8287446: Enhance icon presentations. ++ - JDK-8286910: Improve JNDI lookups. ++ - JDK-8286511: Improve macro allocation. ++ - JDK-8286526: Improve NTLM support. ++ - JDK-8286533: Key X509 usages. ++ - JDK-8286077: Wider MultiByte conversions. ++ - JDK-8286519: Better memory handling. ++ - JDK-8285662: Better permission resolution. ++ - JDK-8282252: Improve BigInteger/Decimal validation. ++ * Build using GCC 12 in recent development distros. ++ ++ -- Matthias Klose Wed, 19 Oct 2022 06:59:58 +0200 ++ ++openjdk-17 (17.0.4+8-1) unstable; urgency=high ++ ++ * OpenJDK 17.0.4+8 (release). ++ * Security fixes ++ - JDK-8272243: Improve DER parsing. ++ - JDK-8272249: Better properties of loaded Properties. ++ - JDK-8273056, JDK-8283875, CVE-2022-21549: java.util.random does not ++ correctly sample exponential or Gaussian distributions. ++ - JDK-8277608: Address IP Addressing. ++ - JDK-8281859, CVE-2022-21540: Improve class compilation. ++ - JDK-8281866, CVE-2022-21541: Enhance MethodHandle invocations. ++ - JDK-8283190: Improve MIDI processing. ++ - JDK-8284370: Improve zlib usage. ++ - JDK-8285407, CVE-2022-34169: Improve Xalan supports. ++ * Disable the reproducible-copyright-headers patch. ++ * Only try to re-run failed tests once instead of three times. ++ ++ -- Matthias Klose Wed, 20 Jul 2022 18:04:41 +0200 ++ ++openjdk-17 (17.0.3+7-1) unstable; urgency=high ++ ++ * OpenJDK 17.0.3+7 (release). ++ * Security fixes ++ - JDK-8269938: Enhance XML processing passes redux. ++ - JDK-8270504, CVE-2022-21426: Better XPath expression handling. ++ - JDK-8272255: Completely handle MIDI files. ++ - JDK-8272261: Improve JFR recording file processing. ++ - JDK-8272588: Enhanced recording parsing. ++ - JDK-8272594: Better record of recordings. ++ - JDK-8274221: More definite BER encodings. ++ - JDK-8275082, JDK-8278008, CVE-2022-21476: Update XML Security for Java ++ to 2.3.0. ++ - JDK-8275151, CVE-2022-21443: Improved Object Identification. ++ - JDK-8277227: Better identification of OIDs. ++ - JDK-8277233, CVE-2022-21449: Improve ECDSA signature support. ++ - JDK-8277672, CVE-2022-21434: Better invocation handler handling. ++ - JDK-8278356: Improve file creation. ++ - JDK-8278449: Improve keychain support. ++ - JDK-8278798: Improve supported intrinsic. ++ - JDK-8278805: Enhance BMP image loading. ++ - JDK-8278972, CVE-2022-21496: Improve URL supports. ++ - JDK-8281388: Change wrapping of EncryptedPrivateKeyInfo. ++ * Refresh patches. ++ ++ -- Matthias Klose Mon, 02 May 2022 20:04:05 +0200 ++ ++openjdk-17 (17.0.2+8-1) unstable; urgency=high ++ ++ * OpenJDK 17.0.2+8 (release). ++ * Addresses security issues: CVE-2022-21366, CVE-2022-21365, CVE-2022-21360, ++ CVE-2022-21341, CVE-2022-21340, CVE-2022-21305, CVE-2022-21299, ++ CVE-2022-21296, CVE-2022-21294, CVE-2022-21293, CVE-2022-21291, ++ CVE-2022-21283, CVE-2022-21282, CVE-2022-21277, CVE-2022-21248. ++ ++ -- Matthias Klose Thu, 20 Jan 2022 17:13:47 +0100 ++ ++openjdk-17 (17.0.1+12-1) unstable; urgency=medium ++ ++ * OpenJDK 17.0.1+12 (release). ++ * Remove patches applied upstream. ++ ++ -- Matthias Klose Wed, 20 Oct 2021 16:47:06 +0200 ++ ++openjdk-17 (17+35-1) unstable; urgency=medium ++ ++ * Fix JDK-8272472, ftbfs with glibc 2.24. ++ ++ -- Matthias Klose Wed, 15 Sep 2021 07:22:52 +0200 ++ ++openjdk-17 (17~35ea-1) unstable; urgency=medium ++ ++ * OpenJDK 17 snapshot, build 35 (first release candidate). ++ ++ -- Matthias Klose Fri, 13 Aug 2021 13:54:06 +0200 ++ ++openjdk-17 (17~33ea-1) unstable; urgency=high ++ ++ * OpenJDK 17 snapshot, build 33. ++ ++ -- Matthias Klose Sat, 31 Jul 2021 09:22:45 +0200 ++ ++openjdk-17 (17~32ea-1) unstable; urgency=high ++ ++ * OpenJDK 17 snapshot, build 32. ++ * Security fixes: ++ - JDK-8256157: Improve bytecode assembly. ++ - JDK-8256491: Better HTTP transport. ++ - JDK-8258432, CVE-2021-2341: Improve file transfers. ++ - JDK-8260453: Improve Font Bounding. ++ - JDK-8260960: Signs of jarsigner signing. ++ - JDK-8260967, CVE-2021-2369: Better jar file validation. ++ - JDK-8262380: Enhance XML processing passes. ++ - JDK-8262403: Enhanced data transfer. ++ - JDK-8262410: Enhanced rules for zones. ++ - JDK-8262477: Enhance String Conclusions. ++ - JDK-8262967: Improve Zip file support. ++ - JDK-8264066, CVE-2021-2388: Enhance compiler validation. ++ - JDK-8264079: Improve abstractions. ++ - JDK-8264460: Improve NTLM support. ++ ++ -- Matthias Klose Mon, 26 Jul 2021 11:25:32 +0200 ++ ++openjdk-17 (17~31ea-1) unstable; urgency=medium ++ ++ * OpenJDK 17 snapshot, build 31. ++ * Encode the early-access status into the package version. LP: #1934895. ++ ++ -- Matthias Klose Sat, 17 Jul 2021 14:25:02 +0200 ++ ++openjdk-17 (17~29-1) unstable; urgency=medium ++ ++ * OpenJDK 17 snapshot, build 29. ++ * Update watch file. ++ * Prepare to build with jtreg6, where available. ++ ++ -- Matthias Klose Thu, 01 Jul 2021 16:42:23 +0200 ++ ++openjdk-17 (17~27-1) unstable; urgency=medium ++ ++ * OpenJDK 17 snapshot, build 27. ++ * Only build using lto with GCC 11. ++ * Build using GCC 11 in recent distributions. ++ * Update VCS attributes. ++ * Disable runnning the tests, requires not yet packaged jtreg6. ++ * Remove rimd, removed upstream. ++ ++ -- Matthias Klose Fri, 18 Jun 2021 15:06:18 +0200 ++ ++openjdk-17 (17~24-1) unstable; urgency=medium ++ ++ * OpenJDK 17 snapshot, build 24. ++ * Drop the work around for JDK 8211105. ++ * Remove jaotc (the experimental JIT compiler), removed upstream. ++ * Add an (unapplied) patch to replace OASIS header files with ones ++ imported from NSPR and NSS. See #985765. Not reviewed, not applying. ++ ++ -- Matthias Klose Thu, 27 May 2021 11:26:59 +0200 ++ ++openjdk-17 (17~19-1) unstable; urgency=high ++ ++ * OpenJDK 17 snapshot, build 19. ++ - Fix JDK-8250568: Less ambiguous processing (CVE-2021-2161). ++ - Fix JDK-8249906: Enhance opening JARs (CVE-2021-2163). ++ ++ -- Matthias Klose Thu, 22 Apr 2021 09:51:43 +0200 ++ ++openjdk-17 (17~17-1) unstable; urgency=medium ++ ++ * OpenJDK 17 snapshot, build 17. ++ * Refresh patches. ++ * Prefer to build using openjdk-17 instead of -16. Closes: #986526. ++ ++ -- Matthias Klose Thu, 08 Apr 2021 09:37:40 +0200 ++ ++openjdk-17 (17~15-1) unstable; urgency=medium ++ ++ * OpenJDK 17 snapshot, build 15. ++ * Fix another upstream typo to build with external harfbuzz. Closes: #985668. ++ * Disable running the tests on mipsel and mips64el. Times out, and if it ++ succeeds, it taken at least five days to run. ++ ++ -- Matthias Klose Thu, 25 Mar 2021 10:31:15 +0100 ++ ++openjdk-17 (17~14-1) unstable; urgency=medium ++ ++ * OpenJDK 17 snapshot, build 14. ++ * Don't use the triplet-prefixed binutils tools for backports. ++ * Simplify compiler selection for backports. ++ * Fix build with older glibc versions. See JDK-8262501. ++ * Apply untested patch suggested to fix mips issue. See #983878. ++ ++ -- Matthias Klose Thu, 18 Mar 2021 19:42:16 +0100 ++ ++openjdk-17 (17~11-1) unstable; urgency=medium ++ ++ * OpenJDK 17 snapshot, build 11. ++ * Configure --with-copyright-year. Addresses: #956154. ++ * Restore the libpcsclite-dlopen patch. Closes: #983465. ++ * Don't run the testsuite on armel and riscv64. Crashing the buildds. ++ ++ -- Matthias Klose Thu, 25 Feb 2021 10:33:32 +0100 ++ ++openjdk-17 (17~10-2) unstable; urgency=medium ++ ++ * Fix the build logic, jaotc and jhsdb tools not available on all archs. ++ * Ship the jfc files used by jfr. ++ * reproducible-build-jmod.diff: Fall back to the unpatched behavior ++ for backports. ++ * Only build with system harfbuzz for recent releases. ++ ++ -- Matthias Klose Sat, 20 Feb 2021 12:02:45 +0100 ++ ++openjdk-17 (17~10-1) unstable; urgency=medium ++ ++ * OpenJDK 17 snapshot, build 10. ++ * Fix building with OpenJDK 17 as the boot jdk. ++ * Build with Rules-Requires-Root: no. ++ * Move libawt_xawt.so, libjawt.so into the jre package. Addresses: #908058. ++ * Move the jfr binary from -jre-headless to -jdk-headless. Development tool. ++ ++ -- Matthias Klose Fri, 19 Feb 2021 13:12:19 +0100 ++ ++openjdk-17 (17~9-1) unstable; urgency=medium ++ ++ * OpenJDK 17 snapshot, build 9. ++ * Enable running the tests. ++ * Remove the disable-doclint-by-default patch. Was not applied anymore ++ in 17. Closes: #982521. ++ ++ -- Matthias Klose Thu, 11 Feb 2021 10:13:25 +0000 ++ ++openjdk-17 (17~8-1) unstable; urgency=medium ++ ++ * OpenJDK 17 snapshot, build 8. ++ * Don't try to apply the removed alpha-float-const patch. ++ * Drop the powerpcspe packaging bits. ++ ++ -- Matthias Klose Thu, 04 Feb 2021 14:35:28 +0100 ++ ++openjdk-17 (17~7-1) unstable; urgency=medium ++ ++ * OpenJDK 17 snapshot, build 7. ++ * Update watch file. ++ * Use debugedit to generate unique build-id's and remove the openjdk-N-dbg ++ file conflicts. ++ * Remove KFreeBSD build support and patches, not updated since OpenJDK 8. ++ * Remove obsolete patches: alpha-float-const, libpcsclite-dlopen, ++ parallel-build-fix, s390x-thread-stack-size. ++ ++ -- Matthias Klose Thu, 28 Jan 2021 14:52:27 +0100 ++ ++openjdk-17 (17~5-1) unstable; urgency=medium ++ ++ * OpenJDK 17 snapshot, build 5. ++ ++ -- Matthias Klose Tue, 19 Jan 2021 10:29:50 +0100 ++ ++openjdk-17 (17~4-0ubuntu1) hirsute; urgency=medium ++ ++ * OpenJDK 17 snapshot, build 4. ++ ++ -- Matthias Klose Sat, 09 Jan 2021 17:25:46 +0100 ++ ++openjdk-17 (17~3-1) experimental; urgency=medium ++ ++ * OpenJDK 17 snapshot, build 3. ++ * OpenJDK 17 is supposed to be the next OpenJDK LTS release seeing extended ++ updates and security support. ++ ++ -- Matthias Klose Mon, 28 Dec 2020 13:57:12 +0100 ++ ++openjdk-16 (16~29-1) unstable; urgency=medium ++ ++ * OpenJDK 16 snapshot, build 29. ++ ++ -- Matthias Klose Mon, 28 Dec 2020 12:44:30 +0100 ++ ++openjdk-16 (16~27-2) unstable; urgency=medium ++ ++ * Fix installation on sparc64. ++ * Link with libatomic for more zero builds. ++ * Re-enable zero on armhf. ++ ++ -- Matthias Klose Fri, 04 Dec 2020 10:01:45 +0100 ++ ++openjdk-16 (16~27-1) unstable; urgency=medium ++ ++ * OpenJDK 16 snapshot, build 27. ++ * Bump standards version. ++ ++ -- Matthias Klose Thu, 03 Dec 2020 16:42:51 +0100 ++ ++openjdk-16 (16~14-1) experimental; urgency=medium ++ ++ * OpenJDK 16 snapshot, build 14. ++ * Build with system harfbuzz. ++ ++ -- Matthias Klose Sat, 07 Nov 2020 14:35:22 +0100 ++ ++openjdk-15 (15.0.1+9-3) unstable; urgency=medium ++ ++ * Don't enabled shenandoahgc explicitly. ++ ++ -- Matthias Klose Sat, 07 Nov 2020 08:54:13 +0100 ++ ++openjdk-15 (15.0.1+9-2) unstable; urgency=medium ++ ++ * Update debian/copyright. ++ * Configure --with-jvm-features=shenandoahgc for hotspot builds. ++ LP: #1902029. ++ * Replace the upstream test suite in the autopkg tests with two superficial ++ tests. It doesn't make any sense to run the whole test suite again, ++ after running it during the build. ++ ++ -- Matthias Klose Thu, 05 Nov 2020 16:55:34 +0100 ++ ++openjdk-15 (15.0.1+9-1) unstable; urgency=medium ++ ++ * OpenJDK 15.0.1 release, build 9. ++ * Build just zero on sparc64 (Adrian Glaubitz). Closes: #965342. ++ * Call strip-nondeterminism before computing jmod hashes (Julian Gilbey). ++ * Don't run the jdk tests as an autopkg test, taking too long. ++ Closes: #970606. ++ ++ -- Matthias Klose Wed, 21 Oct 2020 16:46:35 +0200 ++ ++openjdk-15 (15+36-1) unstable; urgency=medium ++ ++ * OpenJDK 15 release, build 36. ++ ++ -- Matthias Klose Wed, 16 Sep 2020 14:08:36 +0200 ++ ++openjdk-15 (15~32-2) unstable; urgency=medium ++ ++ * debian/copyright (remove licenses not found anymore in the sources): ++ - Little CMS, libpng, GIFLIB. ++ * Stop building zero on armhf, ftbfs. ++ ++ -- Matthias Klose Sun, 19 Jul 2020 10:36:08 +0200 ++ ++openjdk-15 (15~32-1) unstable; urgency=medium ++ ++ * OpenJDK 15 snapshot, build 32. ++ * Build again with -march=zEC12 on Ubuntu/s390x. ++ * Re-add missing parts of the riscv64 patch. ++ * Run the tests on release architectures. ++ ++ -- Matthias Klose Fri, 17 Jul 2020 15:30:05 +0200 ++ ++openjdk-15 (15~19-1) unstable; urgency=medium ++ ++ * OpenJDK 15 snapshot, build 19. ++ * Configure --with-jtreg=/usr/share/jtreg ++ * Build with -march=z13 -mtune=z15 on Ubuntu/s390x. ++ ++ -- Matthias Klose Thu, 16 Apr 2020 11:53:19 +0200 ++ ++openjdk-15 (15~18-1) unstable; urgency=medium ++ ++ * OpenJDK 15 snapshot, build 18. ++ - Stop shipping the rmic command. ++ * Make autopkgtests cross-test-friendly (Steve Langasek). LP: #1861467. ++ * d/tests/jtreg-autopkgtest.in: keep generated hs_err log files ++ with test artifacts to improve later debug (Tiago Stürmer Daitx). ++ * d/tests/jtdiff-autopkgtest.in: set default vm to correctly locate (Tiago ++ Stürmer Daitx) ++ * jhsdb isn't built on sh4 (Adrian Glaubitz). Addresses: #951774. ++ * Enable zero for armhf again. ++ * Enable running the tests again, jtreg now updated to 6.0. ++ ++ -- Matthias Klose Mon, 13 Apr 2020 21:16:21 +0200 ++ ++openjdk-15 (15~11-1) unstable; urgency=medium ++ ++ * OpenJDK 15 snapshot, build 11. ++ * Don't apply the disable-doclint-by-default patch, it should be possible ++ to disable those by command line options. ++ * Don't apply the reproducible-javadoc-timestamp patch. Needs an update. ++ * Refresh patches. ++ * Update all the provides for 15. ++ ++ -- Matthias Klose Fri, 21 Feb 2020 09:48:57 +0100 ++ ++openjdk-14 (14~36-1) unstable; urgency=medium ++ ++ * OpenJDK 14 snapshot, build 36 (first release candidate). ++ * Regenerate the control file. Closes: #942783. ++ * Fix FTCBFS (Helmut Grohne). Closes: #949460. ++ - Missing Build-Depends: zlib1g-dev:native. ++ - Use triplet-prefixed objcopy and strip. ++ * Bump standards version. ++ ++ -- Matthias Klose Tue, 11 Feb 2020 09:08:15 +0100 ++ ++openjdk-14 (14~32-1) unstable; urgency=medium ++ ++ * OpenJDK 14 snapshot, build 32. ++ * Make the generated character data source files reproducible (Emmanuel ++ Bourg). Addresses: #933339. ++ * Make the generated module-info.java files reproducible (Emmanuel Bourg). ++ Addresses: #933342. ++ * Make the generated copyright headers reproducible (Emmanuel Bourg). ++ Addresses: #933349. ++ * Make the build user reproducible (Emmanuel Bourg). Addresses: #933373. ++ ++ -- Matthias Klose Thu, 16 Jan 2020 23:31:42 +0100 ++ ++openjdk-14 (14~31-1) unstable; urgency=medium ++ ++ * OpenJDK 14 snapshot, build 31. ++ * openjdk-14-dbg: Conflict with openjdk-13-dbg as well. Closes: #942783. ++ * Configure with --enable-deprecated-ports=yes on sparc64. Closes: #946723. ++ * disable Zero on sparc64. Closes: #946766. ++ ++ -- Matthias Klose Thu, 09 Jan 2020 15:19:43 +0100 ++ ++openjdk-14 (14~27-1) unstable; urgency=medium ++ ++ * OpenJDK 14 snapshot, build 27. ++ * Fix the jtreg consistency check when building without jtreg. ++ * Don't call dh_strip_nondeterminism when building for older releases. ++ * Fix disabling the zero build on arm64 on trusty. ++ * Re-enable running the tests. ++ ++ -- Matthias Klose Sat, 14 Dec 2019 11:21:53 +0100 ++ ++openjdk-14 (14~19-1) unstable; urgency=medium ++ ++ * OpenJDK 14 snapshot, build 19. ++ ++ -- Matthias Klose Thu, 17 Oct 2019 08:26:17 +0200 ++ ++openjdk-14 (14~18-1) experimental; urgency=medium ++ ++ * OpenJDK 14 snapshot, build 18. ++ * Bump standards version. ++ * Use dh_strip_nondeterminism (Emmanuel Bourg). Addresses: #933389. ++ * Fix 8230708, server build on sparc64 (Adrian Glaubitz). Closes: #939565. ++ * Fix FTBFS with DEB_BUILD_PROFILES=nocheck (Helmut Grohne). ++ Addresses: #939521. ++ * Fix debug and src symlinks. Addresses: #893134, #910694, #910696. ++ ++ -- Matthias Klose Thu, 10 Oct 2019 11:05:23 +0200 ++ ++openjdk-14 (14~14-2) experimental; urgency=medium ++ ++ * OpenJDK 14 snapshot, build 14. ++ * Don't build zero on armhf, still broken. ++ ++ -- Matthias Klose Thu, 12 Sep 2019 17:54:43 +0200 ++ ++openjdk-14 (14~13-1) experimental; urgency=medium ++ ++ * OpenJDK 14 snapshot, build 13. ++ * Properly generate Breaks: rules for bionic (fix typo). ++ * Remove libgtk-3-dev from build-deps: libgtk-3-dev is not actually ++ required, package builds fine without it; libgtk2.0-0 or libgtk-3-0 ++ should be explicitly declared instead in bdeps and tests; ++ libxrandr-dev should be explicitly added as it is required and was ++ being included due to libgtk-3-dev dependency. ++ * Set minimum dependency on jtreg based on testsuite requirements. ++ * Fail during pre-build if installed jtreg version is lower then ++ the minimum required version. ++ * Improve and fix build tests and autopkgtests: ++ - Depend on default-jre-headless so jtreg will use the ++ JRE from /usr/default-java; remove JT_JAVA exports as it ++ no longer needs to be set. ++ - Update debian/tests/hotspot,jdk,langtools to ignore ++ jtreg-autopkgtest.sh return code. ++ - Create debian/tests/jtdiff-autopkgtest.in as it depends ++ on debian/rules variables. ++ - debian/tests/jtreg-autopkgtest.sh: ++ + Enable retry of failed tests to trim out flaky tests. ++ + Fix unbound variable. ++ + Keep .jtr files from failed tests only. ++ - debian/tests/jtdiff-autopkgtest.sh: ++ + Fail only if an actual regression is detected. ++ + Add the super-diff comparison from jtdiff. ++ - debian/rules: ++ + Preserve all JTreport directories in the test output ++ directory. ++ + Use JDK_DIR instead of JDK_TO_TEST for autopkgtest ++ generation. ++ + Package all .jtr files from JTwork as jtreg-autopkgtest.sh ++ makes sure it contains only failing tests. ++ * Bump standards version. ++ * Try to build zero on armhf again. ++ ++ -- Matthias Klose Fri, 06 Sep 2019 08:22:27 +0200 ++ ++openjdk-14 (14~6-0ubuntu1) eoan; urgency=medium ++ ++ * OpenJDK 14 snapshot, build 6. ++ * Fix dependency generation on the libjpeg runtime. Addresses: #927965. ++ * Drop dependency on transitional libgl1-mesa-glx package. Addresses: #930611. ++ * Fix more build issues for Ubuntu precise builds. ++ * Don't install jhsdb on riscv64 (Aurelian Jarno). ++ * Tighten dependency on jtreg. ++ ++ -- Matthias Klose Thu, 18 Jul 2019 17:44:41 +0200 ++ ++openjdk-14 (14~3-1) experimental; urgency=medium ++ ++ * OpenJDK 14 snapshot, build 3. ++ * Add riscv64 support for zero (Ed Nevill). ++ * Support using the Java ATK wrapper (Samuel Thibault). Addresses: #900912. ++ - patches/jaw-classpath.diff: Fix finding the Java ATK wrapper. ++ - patches/jaw-optional.diff: Make failing to load the Java ATK wrapper ++ non-fatal. ++ * Apply proposed fix for zero builds. ++ ++ -- Matthias Klose Fri, 28 Jun 2019 12:13:08 +0200 ++ ++openjdk-14 (14~1-1) experimental; urgency=medium ++ ++ * OpenJDK 14 snapshot, build 1. ++ ++ -- Matthias Klose Fri, 14 Jun 2019 10:09:16 +0200 ++ ++openjdk-13 (13~25-1) experimental; urgency=medium ++ ++ * OpenJDK 13 snapshot, build 25. ++ ++ -- Matthias Klose Fri, 14 Jun 2019 09:47:38 +0200 ++ ++openjdk-13 (13~24-1) experimental; urgency=medium ++ ++ * OpenJDK 13 snapshot, build 24. ++ ++ -- Matthias Klose Fri, 07 Jun 2019 09:58:37 +0200 ++ ++openjdk-13 (13~23-1) experimental; urgency=medium ++ ++ * OpenJDK 13 snapshot, build 23. ++ * Fix version number for updated jquery file. ++ * Print some information about the host configuration before starting ++ the build. ++ * Add a watch file (Emmanuel Bourg). ++ * Apply the updated patch for m68k support (Adrian Glaubitz). ++ Closes: #928167. ++ * Apply fix for 8222252, fixing 32bit builds. ++ ++ -- Matthias Klose Fri, 31 May 2019 10:12:20 +0200 ++ ++openjdk-13 (13~22-1) experimental; urgency=medium ++ ++ * OpenJDK 13 snapshot, build 22. ++ * Fix src.zip symlink. Addresses: #923118. ++ * Update patch for m68k support (Adrian Glaubitz). Closes: #928167. ++ * Configure with --with-version-pre='ea' for upstream tags which are ++ not upstream releases (has to be set manually). ++ * Refresh patches. ++ ++ -- Matthias Klose Mon, 27 May 2019 19:44:41 +0200 ++ ++openjdk-13 (13~18-1) experimental; urgency=medium ++ ++ * OpenJDK 13 snapshot, build 18. ++ * Add breaks to the openjdk-13-jre-headless package: ++ - For unattended upgrades: jetty9, netbeans, tomcat8, visualvm. ++ - For eclipse 3.8 removal: eclipse-platform. Addresses: #925071. ++ - For configuration with vendor flag: libreoffice-core. ++ * Add more -dbg package conflicts. Closes: #927745. ++ * Class data sharing is enabled during the build where available. ++ Just use the shipped classes.jsa files. Closes: #927441. ++ ++ -- Matthias Klose Thu, 25 Apr 2019 10:56:56 +0200 ++ ++openjdk-13 (13~17-2) experimental; urgency=medium ++ ++ * Fix the build of the -doc package. ++ * Don't build zero on armhf for now. ++ ++ -- Matthias Klose Fri, 19 Apr 2019 03:00:51 +0200 ++ ++openjdk-13 (13~17-1) experimental; urgency=medium ++ ++ [ Matthias Klose ] ++ * Build the alternate zero VM using the just built hotspot VM. ++ * Remove the icedtea-sound/pulseaudio build support. ++ * Remove some xulrunner build bits. ++ * Annotate the bootstrap dependency with :native. ++ * Fix installation of the -doc package. Closes: #926845, #926917. ++ ++ [ Tiago Stürmer Daitx ] ++ * Install swing.properties into /conf instead of /lib. ++ ++ -- Matthias Klose Thu, 18 Apr 2019 06:33:56 +0200 ++ ++openjdk-13 (13~14-1) experimental; urgency=medium ++ ++ [ Matthias Klose ] ++ * OpenJDK 13 build 14. ++ * Configure with vendor flags. ++ * Update package provides for 12 and 13. ++ ++ [ Tiago Stürmer Daitx ] ++ * Revert to GTK2 as default since GTK3 still has padding and ++ component issues. LP: #1770278. ++ - debian/patches/keep-gtk2-as-default.patch: revert upstream so ++ GTK2 is loaded before GTK3 when available. ++ - debian/rules, debian/control: Set jre to depend on libgtk2.0-0 ++ and alternatively to libgtk-3-0. ++ ++ -- Matthias Klose Thu, 28 Mar 2019 08:54:33 +0100 ++ ++openjdk-13 (13~13-1) experimental; urgency=medium ++ ++ * OpenJDK 13 build 13. ++ ++ -- Matthias Klose Thu, 21 Mar 2019 14:40:49 +0100 ++ ++openjdk-13 (13~12-1) experimental; urgency=medium ++ ++ * OpenJDK 13 build 12. ++ * Don't apply the m68k-support patch. Needs updates. ++ ++ -- Matthias Klose Sat, 16 Mar 2019 20:59:50 +0100 ++ ++openjdk-12 (12~33-1) unstable; urgency=medium ++ ++ * OpenJDK 12 build 33 (release candidate 2). ++ * Add support for DCEVM on am64 and i386. Addresses: #921419. ++ * openjdk-12-dbg: Conflict with openjdk-11-dbg. Some object files are ++ identical across OpenJDK versions. Closes: #919790. ++ * Check for nodoc instead of nodocs in DEB_BUILD_OPTIONS. Closes: #922761. ++ ++ -- Matthias Klose Tue, 26 Feb 2019 11:24:37 +0100 ++ ++openjdk-12 (12~32-1) unstable; urgency=medium ++ ++ * OpenJDK 12 build 32. ++ * Attribute test dependencies with . ++ ++ -- Matthias Klose Thu, 14 Feb 2019 17:15:22 +0100 ++ ++openjdk-12 (12~31-1) unstable; urgency=medium ++ ++ * OpenJDK 12 build 31. ++ * Configure with vendor flags. ++ ++ -- Matthias Klose Thu, 07 Feb 2019 11:50:45 +0100 ++ ++openjdk-12 (12~30-1) unstable; urgency=medium ++ ++ * OpenJDK 12 build 30. ++ ++ -- Matthias Klose Fri, 01 Feb 2019 23:30:59 +0100 ++ ++openjdk-12 (12~29-2) unstable; urgency=medium ++ ++ * Fix merging the packaging changes. ++ ++ -- Matthias Klose Fri, 25 Jan 2019 08:38:26 +0100 ++ ++openjdk-12 (12~29-1) unstable; urgency=medium ++ ++ * OpenJDK 12 build 29. ++ * Merge packaging changes from 11.0.2. ++ * Link zero on alpha with --no-relax (Michael Cree). Closes: #920161). ++ ++ -- Matthias Klose Thu, 24 Jan 2019 09:09:16 +0100 ++ ++openjdk-12 (12~28-1) unstable; urgency=medium ++ ++ * OpenJDK 12 build 28. ++ * jhsdb isn't built on alpha. ++ ++ -- Matthias Klose Thu, 17 Jan 2019 13:28:33 +0100 ++ ++openjdk-12 (12~27-1) unstable; urgency=medium ++ ++ * OpenJDK 12 build 27. ++ * Don't run the tests on alpha, causing build failure. ++ ++ -- Matthias Klose Thu, 10 Jan 2019 12:55:21 +0100 ++ ++openjdk-12 (12~25-1) unstable; urgency=medium ++ ++ * OpenJDK 12 build 25. ++ * Fix Zero on sparc64 (Adrian Glaubitz). Closes: #917008. ++ ++ -- Matthias Klose Sat, 22 Dec 2018 01:12:39 +0100 ++ ++openjdk-12 (12~24-2) unstable; urgency=medium ++ ++ * Pull patches for 8215353 and 8215374, fixing build on i386. ++ ++ -- Matthias Klose Fri, 14 Dec 2018 20:44:28 +0100 ++ ++openjdk-12 (12~24-1) unstable; urgency=medium ++ ++ * OpenJDK 12 build 24. ++ * Re-enable the bootcycle builds for hotspot targets. ++ * Enable bootcycle builds for zero targets. ++ ++ -- Matthias Klose Fri, 14 Dec 2018 19:18:49 +0100 ++ ++openjdk-12 (12~23-3) unstable; urgency=medium ++ ++ * Disable the bootcycle build. ++ ++ -- Matthias Klose Fri, 07 Dec 2018 01:23:44 +0100 ++ ++openjdk-12 (12~23-1) unstable; urgency=medium ++ ++ * OpenJDK 12 build 23. ++ * Update VCS attributes in the control file. Addresses: #909736. ++ * Re-enable the zero build on arm64 and ppc64el. ++ * Configure with --with-jni-libpath. ++ * Enable bootcycle builds for hotspot. ++ * Don't build the test-image target. ++ ++ -- Matthias Klose Thu, 06 Dec 2018 16:37:32 +0100 ++ ++openjdk-12 (12~22-0ubuntu2) disco; urgency=medium ++ ++ * OpenJDK 12 build 22. ++ * Disable zero build on arm64 and ppc64el, ftbfs. ++ ++ -- Matthias Klose Sun, 02 Dec 2018 21:34:27 +0100 ++ ++openjdk-11 (11.0.1+13-3) unstable; urgency=medium ++ ++ * Tighten dependency on debhelper on recent releases. Closes: #911694. ++ * Reproducible properties file header when SOURCE_DATE_EPOCH is specified. ++ Closes: #914278. ++ * Add SOURCE_DATE_EPOCH support to the javadoc tool. Closes: #783938. ++ * Disable AArch64 intrinsics for sin, cos and log. Closes: #910188. ++ LP: #1796982. ++ * Add support for DEB_BUILD_OPTIONS=terse. Closes: #912211. ++ ++ -- Matthias Klose Fri, 30 Nov 2018 11:40:28 +0100 ++ ++openjdk-11 (11.0.1+13-2) unstable; urgency=high ++ ++ * OpenJDK 11.0.1 release. ++ ++ [ Tiago Stürmer Daitx ] ++ * debian/rules: ++ - limit the tests that we run for the hotspot, langtools, ++ and jdk testsuites to improve build times and also to ++ prevent running unstable or failing tests. ++ - fix 'if' clause for definition of the TIME command. ++ - remove guava jar from test classpath, no longer required. ++ * debian/control, debian/control.in: add a breaks clause to ++ clojure1.8 <= 1.8.0-7ubuntu1~. ++ ++ -- Matthias Klose Wed, 17 Oct 2018 09:31:38 +0200 ++ ++openjdk-11 (11~28-3) unstable; urgency=medium ++ ++ * Build-depend on testng. ++ * Drop the jdk-freetypeScaler-crash patch. Closes: #905718. ++ * Don't run the tests on armel and mips*. Either too slow, or even ++ crashing the buildds. ++ ++ -- Matthias Klose Wed, 03 Oct 2018 04:24:27 +0200 ++ ++openjdk-11 (11~28-2) unstable; urgency=medium ++ ++ [ Matthias Klose ] ++ * Configure --with-jvm-features=zgc on amd64. Closes: #909560. ++ ++ [ Tiago Stürmer Daitx ] ++ * debian/rules: by default leave atk disabled, move accessibility bridge to ++ recommends. LP: #1788250. LP: #1788267. ++ * debian/control.in, debian/control: move accessibility bridge to ++ recommends, add testng and libguava-java as build dependencies. ++ * debian/rules: ++ - copy accessibility files to conf/ (thanks to Samuel Thibault). ++ - update test rules, set output to jtreg-test-output/ as test/ belongs ++ to openjdk source, add jaxp and nashorn, use ProblemList.txt from ++ each testsuite as the exclude list for jtreg, update regex to include ++ into the tarball failed jcstress tests in the Test.java#id123 format, ++ create a tarball with the JTreport directory, add guava jar file to ++ the test classpath. ++ * debian/excludelist.jdk.jtx, excludelist.langtools.jtx: removed, tests ++ now rely on the ProblemList.txt exclusion list that is included in-tree ++ and maintained upstream. ++ ++ -- Matthias Klose Tue, 25 Sep 2018 18:14:28 +0200 ++ ++openjdk-11 (11~28-1) unstable; urgency=medium ++ ++ * OpenJDK 11 build 27. ++ ++ -- Matthias Klose Tue, 04 Sep 2018 14:37:49 +0200 ++ ++openjdk-11 (11~27-1) unstable; urgency=medium ++ ++ * OpenJDK 11 build 27. ++ ++ -- Matthias Klose Mon, 20 Aug 2018 11:30:22 +0200 ++ ++openjdk-11 (11~24-1) unstable; urgency=medium ++ ++ * OpenJDK 11 build 24. ++ * jhsdb is not available on ia64 and m68k either. ++ * Update the m68k support (Adrian Glaubitz). Closes: #904204. ++ ++ -- Matthias Klose Fri, 27 Jul 2018 05:41:39 +0200 ++ ++openjdk-11 (11~23-1) unstable; urgency=medium ++ ++ * OpenJDK 11 build 23. ++ * Explicitly build-depend on libfontconfig1-dev, explicitly depend ++ on libfontconfig1. LP: #1780151. ++ * jhsdb is not available on armel, mipsel and mips64el. Closes: #903631. ++ * Build using GCC 8. ++ * Configure with --with-native-debug-symbols=internal. ++ ++ -- Matthias Klose Thu, 19 Jul 2018 20:03:05 +0200 ++ ++openjdk-11 (11~22-2) unstable; urgency=medium ++ ++ * jhsdb is not available on mips. ++ ++ -- Matthias Klose Fri, 13 Jul 2018 07:48:13 +0200 ++ ++openjdk-11 (11~22-1) unstable; urgency=medium ++ ++ * OpenJDK 11 build 22. ++ * jhsdb is not available on powerpc, s390x and x32. ++ ++ -- Matthias Klose Thu, 12 Jul 2018 17:00:12 +0200 ++ ++openjdk-11 (11~21-2) unstable; urgency=medium ++ ++ * jaotc is only available on amd64 and arm64. ++ ++ -- Matthias Klose Sun, 08 Jul 2018 08:22:34 +0200 ++ ++openjdk-11 (11~21-1) unstable; urgency=medium ++ ++ * OpenJDK 11 build 21. ++ * Fix the jre/jdk split again. ++ ++ -- Matthias Klose Fri, 06 Jul 2018 07:10:53 +0200 ++ ++openjdk-11 (11~19-1) unstable; urgency=medium ++ ++ * OpenJDK 11 build 19. ++ ++ -- Matthias Klose Mon, 25 Jun 2018 11:04:39 +0200 ++ ++openjdk-11 (11~18-2) unstable; urgency=medium ++ ++ * Fix the parallel build with multiple VM variants. ++ * Fix installation of the jre binaries. Closes: #901674. ++ ++ -- Matthias Klose Tue, 19 Jun 2018 06:17:22 +0200 ++ ++openjdk-11 (11~18-1) unstable; urgency=medium ++ ++ * OpenJDK 11 build 18. ++ * Apply fix to build on sparc64. Closes: #901410. ++ * Build again the client VM on i386, limiting parallel make to two cores. ++ ++ -- Matthias Klose Fri, 15 Jun 2018 14:09:40 +0200 ++ ++openjdk-11 (11~17-2) unstable; urgency=medium ++ ++ * Don't build the client VM on i386, fails since build 16. ++ ++ -- Matthias Klose Sat, 09 Jun 2018 21:03:00 +0200 ++ ++openjdk-11 (11~17-1) unstable; urgency=medium ++ ++ * OpenJDK 11 build 17. ++ ++ -- Matthias Klose Fri, 08 Jun 2018 21:14:46 +0200 ++ ++openjdk-11 (11~16-1) unstable; urgency=medium ++ ++ * OpenJDK 11 build 16. ++ ++ -- Matthias Klose Fri, 01 Jun 2018 13:21:38 +0200 ++ ++openjdk-11 (11~15-1) unstable; urgency=medium ++ ++ * OpenJDK 11 build 15. ++ ++ -- Matthias Klose Mon, 28 May 2018 10:57:30 +0200 ++ ++openjdk-11 (11~13-2) unstable; urgency=medium ++ ++ * Stop shipping the appletviewer binary. ++ ++ -- Matthias Klose Sun, 13 May 2018 20:09:17 -0400 ++ ++openjdk-11 (11~13-1) unstable; urgency=medium ++ ++ * OpenJDK 11 build 13. ++ ++ -- Matthias Klose Sun, 13 May 2018 14:41:09 -0400 ++ ++openjdk-11 (11~12-1) unstable; urgency=medium ++ ++ * OpenJDK 11 build 12. ++ * Fix ftbfs on powerpcspe, adjusting the MIN_FLOAT constant (Adrian Glaubitz). ++ Addresses: #897334. ++ * Remove classes.jsa files on package removal. Addresses: #897411. ++ ++ -- Matthias Klose Sat, 05 May 2018 17:31:20 +0200 ++ ++openjdk-11 (11~11-2) unstable; urgency=medium ++ ++ * Drop the alternative build dependency on OpenJDK 9. ++ * Fix again the build on x32, not passing -m32 to the compiler. ++ * Restore parts of the mips-sigset patch which are not yet upstream. ++ ++ -- Matthias Klose Sat, 28 Apr 2018 22:17:34 +0200 ++ ++openjdk-11 (11~11-1) unstable; urgency=medium ++ ++ * OpenJDK 11 build 11. ++ * Stop installing the policytool desktop file. LP: #1766843, ++ ++ -- Matthias Klose Fri, 27 Apr 2018 08:23:56 +0200 ++ ++openjdk-11 (11~9-1) unstable; urgency=medium ++ ++ * OpenJDK 11 build 9. ++ ++ [ Tiago Stürmer Daitx ] ++ * debian/rules: do not compress the element-list api docs as javadoc expects ++ this file to be uncompressed when using '-link' or '-linkoffline'. ++ Closes: #895587. ++ ++ -- Matthias Klose Sun, 15 Apr 2018 06:36:41 +0200 ++ ++openjdk-11 (11~8-1) unstable; urgency=medium ++ ++ * OpenJDK 11 build 8. ++ * Update the s390x-zEC12 patch. ++ * Drop obsolete patches. Closes: #895060. ++ ++ -- Matthias Klose Sun, 08 Apr 2018 10:58:36 +0200 ++ ++openjdk-11 (11~7-1) unstable; urgency=medium ++ ++ * OpenJDK 11 build 7. ++ * Build using gtk 3.0. ++ * Add again ia64 support (Adrian Glaubitz). Closes: #894064. ++ ++ -- Matthias Klose Mon, 02 Apr 2018 08:56:00 +0200 ++ ++openjdk-11 (11~5-1) unstable; urgency=medium ++ ++ * OpenJDK 11 build 5. ++ * Don't pass -m32/-m64 compiler flags explicitly on architectures not ++ understanding these (James Cowgill). Closes: #893089. ++ * Update the zero-x32 patch. ++ ++ -- Matthias Klose Thu, 22 Mar 2018 07:46:26 +0800 ++ ++openjdk-11 (11~4-2) unstable; urgency=medium ++ ++ * Update apport hook name for 11. LP: #1738579. ++ ++ -- Matthias Klose Thu, 15 Mar 2018 14:30:13 +0100 ++ ++openjdk-11 (11~4-1) unstable; urgency=medium ++ ++ * OpenJDK 11 build 4. ++ * Disable the m68k and x32 patches for now (needs an update). ++ * Build with -fpermissive on armel and armhf for now. ++ ++ -- Matthias Klose Thu, 15 Mar 2018 10:30:13 +0100 ++ ++openjdk-10 (10~46-1) unstable; urgency=medium ++ ++ * OpenJDK 10 build 46. ++ * Update patch for m68k (Adrian Glaubitz). Closes: #883570). ++ * Update patch for alpha (Adrian Glaubitz). Closes: #885018). ++ * Drop powerpcspe patch. Closes: #883493. ++ * Work around make-4.2 MAKEFLAGS issue. Closes: #891573. ++ ++ -- Matthias Klose Wed, 14 Mar 2018 18:10:28 +0100 ++ ++openjdk-10 (10~32-1) experimental; urgency=medium ++ ++ * OpenJDK 10 build 32. ++ ++ -- Matthias Klose Sun, 19 Nov 2017 17:52:21 +0100 ++ ++openjdk-9 (9.0.1+11-1) unstable; urgency=medium ++ ++ * OpenJDK 9.0.1+11 release. ++ * Bump standards version. ++ * Configure with an empty --with-version-pre setting. LP: #1722410. ++ * Remove JamVM packaging bits. Closes: #877523. ++ * Remove Shark packaging bits. ++ * Fix java/javac/jar lockups on SMP Alpha (Michael Cree). Closes: #875288. ++ * Fix crashes in i386 applications using JNI due to Hotspot workaround for ++ Exec Shield (Ben Hutchings). Closes: #876069. ++ * Re-enable building altzero architectures: Closes: #874292. ++ * Update the zero-sparc patch (Adrian Glaubitz). Closes: #874265. ++ * Fix recommendation of microhei/zenhei font packages. Closes: #868205. ++ * Move jmod files into the openjdk-jdk-headless package. Closes: #878272. ++ ++ -- Matthias Klose Fri, 27 Oct 2017 01:44:31 +0200 ++ ++openjdk-9 (9~b181-4) unstable; urgency=medium ++ ++ * Fix whitespace in debian/rules. Closes: #873104, #873117. ++ ++ -- Matthias Klose Thu, 24 Aug 2017 19:15:14 +0200 ++ ++openjdk-9 (9~b181-3) unstable; urgency=medium ++ ++ * Fix libjvm.so's .debug file names. ++ * Install an apport hook when building on Ubuntu and derivatives. ++ * Update the disable-doclint patch (Chris West). Closes: #866908. ++ * Configure --with-debug-level=release on m68k as well (Adrian Glaubitz). ++ Closes: #871316. ++ * Drop armel and sparc from the list of alternative zero archs (Adrian ++ Glaubitz). Closes: #871319. ++ * Simplify the zero-sparc patch (Adrian Glaubitz). Closes: #872756. ++ * Remove some obsolete unused patches. Closes: #871606. ++ ++ -- Matthias Klose Thu, 24 Aug 2017 00:42:22 +0200 ++ ++openjdk-9 (9~b181-2) unstable; urgency=medium ++ ++ * Update the jdk-8067331 patch (Adrian Glaubitz). Closes: #871009. ++ * Build zero on armhf again. ++ ++ -- Matthias Klose Mon, 07 Aug 2017 10:28:46 -0400 ++ ++openjdk-9 (9~b181-1) unstable; urgency=medium ++ ++ * OpenJDK 9, b181. ++ * Don't hard-code multiarch-support dependency. Closes: #870521. ++ * Just configure m68k --with-debug-level=slowdebug. Closes: #868255. ++ * Fix atomic_copy64 on powerpc (Andrew Haley). Closes: #870403. ++ ++ -- Matthias Klose Fri, 04 Aug 2017 07:35:49 -0400 ++ ++openjdk-9 (9~b179-2) unstable; urgency=medium ++ ++ * Really configure --with-debug-level=slowdebug on Zero-only architectures. ++ Closes: #868255. ++ * Really enable the zero-sparc patch (Adrian Glaubitz). Closes: #864359. ++ * Try again building zero on arm64 with slowdebug. ++ ++ -- Matthias Klose Mon, 24 Jul 2017 13:20:10 +0200 ++ ++openjdk-9 (9~b179-1) unstable; urgency=medium ++ ++ * OpenJDK 9, b179. ++ * Drop armel as a hotspot architecture. Closes: #864464. ++ * Configure --with-debug-level=slowdebug on Zero-only architectures. ++ Closes: #868255. ++ * Fix recommendation of microhei/zenhei font packages. Closes: #868205. ++ * Enable the zero-sparc patch (Adrian Glaubitz). Closes: #864359. ++ * Update the disable-doclint patch (Chris West). Closes: #866908. ++ * Disable the jamvm autopkg tests. ++ ++ -- Matthias Klose Sat, 22 Jul 2017 11:49:18 +0200 ++ ++openjdk-9 (9~b177-3) unstable; urgency=medium ++ ++ * Fix applying the s390x-thread-stack-size patch. ++ ++ -- Matthias Klose Fri, 07 Jul 2017 19:18:19 +0200 ++ ++openjdk-9 (9~b177-2) unstable; urgency=medium ++ ++ * Try to build zero again on amd64, arm64, ppc64el and s390x. ++ * Keep the conf/* symlinks in the JAVA_HOME directory. ++ Closes: #866924, #863080. ++ * Drop armel from the list of hotspot architectures. See #864464. ++ * Stop using deprecated GNOME libraries. Closes: #850268. ++ * Apply sparc64 build fixes (Adrian Glaubitz). Closes: #864351. ++ * Update the zero-sparc patch (Adrian Glaubitz). Closes: #864359. ++ * Update the m68k-support patch (Adrian Glaubitz). Closes: #864595. ++ * Disable generation of jvmti.html on m68k (Adrian Glaubitz). ++ Closes: #864596. ++ * Update the zero-sh patch (Adrian Glaubitz). Closes: #864643. ++ * Update the jdk-target-arch-define patch. Closes: #865129. ++ * Provide jvmdir symlink in /usr/lib/debug. Addresses: #867314. ++ * Fix pt_BR translation in awt message. Addresses: #863331. ++ * Drop the 8073754-stack-overflow-9-build patch. Closes: #864288. ++ * Use sigset_t to store the signals used by the JVM (James Cowgill). ++ Closes: #841173. ++ ++ -- Matthias Klose Fri, 07 Jul 2017 12:28:53 +0200 ++ ++openjdk-9 (9~b177-1) unstable; urgency=medium ++ ++ * OpenJDK 9, b177. ++ ++ -- Matthias Klose Fri, 07 Jul 2017 02:18:33 +0000 ++ ++openjdk-9 (9~b170-2) unstable; urgency=medium ++ ++ * OpenJDK 9, b170. ++ ++ -- Matthias Klose Thu, 18 May 2017 11:57:43 -0700 ++ ++openjdk-9 (9~b169-1) unstable; urgency=medium ++ ++ * OpenJDK 9, b169. ++ ++ -- Matthias Klose Sun, 14 May 2017 10:11:24 -0700 ++ ++openjdk-9 (9~b168-1) unstable; urgency=medium ++ ++ * OpenJDK 9, b168. ++ ++ -- Matthias Klose Fri, 05 May 2017 14:33:09 +0200 ++ ++openjdk-9 (9~b164-1) unstable; urgency=medium ++ ++ * OpenJDK 9, b164. ++ ++ -- Matthias Klose Sat, 08 Apr 2017 21:39:41 +0200 ++ ++openjdk-9 (9~b161-1) unstable; urgency=medium ++ ++ * OpenJDK 9, b161. ++ * Don't build the zero JRE on Ubuntu/s390x. ++ * Fix changelog format. ++ ++ -- Matthias Klose Fri, 17 Mar 2017 00:09:34 +0100 ++ ++openjdk-9 (9~b159-1) unstable; urgency=medium ++ ++ * OpenJDK 9, b159. ++ ++ -- Matthias Klose Fri, 03 Mar 2017 19:03:10 +0100 ++ ++openjdk-9 (9~b158-1) unstable; urgency=medium ++ ++ * OpenJDK 9, b158. ++ * Add OpenJDK Stack Unwinder and Frame Decorator for gdb. ++ ++ -- Matthias Klose Sun, 26 Feb 2017 18:57:42 +0100 ++ ++openjdk-9 (9~b155-1) unstable; urgency=medium ++ ++ * OpenJDK 9, b155. ++ ++ -- Matthias Klose Fri, 03 Feb 2017 07:56:11 +0100 ++ ++openjdk-9 (9~b154-1) unstable; urgency=medium ++ ++ * OpenJDK 9, b154. ++ * Fix libjpeg dependency. Closes: #852420. ++ ++ -- Matthias Klose Thu, 26 Jan 2017 23:33:55 +0100 ++ ++openjdk-9 (9~b153-2) unstable; urgency=medium ++ ++ * Lower cpu requirements for Debian/s390x. ++ ++ -- Matthias Klose Sat, 21 Jan 2017 21:52:52 +0100 ++ ++openjdk-9 (9~b153-1) unstable; urgency=medium ++ ++ * OpenJDK 9, b153. ++ ++ -- Matthias Klose Fri, 20 Jan 2017 14:39:18 +0100 ++ ++openjdk-9 (9~b151-2) unstable; urgency=medium ++ ++ * Fix builds with zero and jamvm as an alternate VM. ++ ++ -- Matthias Klose Tue, 10 Jan 2017 15:40:20 +0100 ++ ++openjdk-9 (9~b151-1) experimental; urgency=medium ++ ++ * OpenJDK 9, b151. ++ ++ -- Matthias Klose Sun, 08 Jan 2017 23:16:21 +0100 ++ ++openjdk-9 (9~b149-1) experimental; urgency=medium ++ ++ * OpenJDK 9, b149. ++ ++ -- Matthias Klose Sat, 17 Dec 2016 10:58:22 +0100 ++ ++openjdk-9 (9~b148-1) experimental; urgency=medium ++ ++ * OpenJDK 9, b148. ++ ++ -- Matthias Klose Mon, 12 Dec 2016 14:04:02 +0000 ++ ++openjdk-9 (9~b147-1) experimental; urgency=medium ++ ++ * OpenJDK 9, b147. ++ * Use sigset_t to store the signals used by the JVM (James Cowgill). ++ Addresses: #841173. ++ ++ -- Matthias Klose Sat, 03 Dec 2016 15:41:51 +0100 ++ ++openjdk-9 (9~b144-1) experimental; urgency=medium ++ ++ * OpenJDK 9, b144. ++ ++ -- Matthias Klose Fri, 11 Nov 2016 14:43:48 +0100 ++ ++openjdk-9 (9~b143-2) experimental; urgency=medium ++ ++ * Fix 8168567, unbreaking the build on arm64. ++ ++ -- Matthias Klose Mon, 07 Nov 2016 15:46:43 +0100 ++ ++openjdk-9 (9~b143-1) experimental; urgency=medium ++ ++ * OpenJDK 9, b143. ++ ++ -- Matthias Klose Sun, 06 Nov 2016 22:36:28 +0100 ++ ++openjdk-9 (9~b142-1) experimental; urgency=medium ++ ++ * OpenJDK 9, b142. ++ ++ -- Matthias Klose Sat, 29 Oct 2016 12:03:19 +0200 ++ ++openjdk-9 (9~b140-2) experimental; urgency=medium ++ ++ * Don't build zero on ppc64 and ppc64el (fails to build). ++ ++ -- Matthias Klose Wed, 19 Oct 2016 17:02:37 +0200 ++ ++openjdk-9 (9~b140-1) experimental; urgency=medium ++ ++ * OpenJDK 9, b140. ++ ++ -- Matthias Klose Mon, 17 Oct 2016 12:01:47 +0200 ++ ++openjdk-9 (9~b139-1) experimental; urgency=medium ++ ++ * OpenJDK 9, b139. ++ * Stop building zero on amd64, fails to build with hardening defaults. ++ ++ -- Matthias Klose Sun, 09 Oct 2016 10:03:18 +0200 ++ ++openjdk-9 (9~b136-1ubuntu1) yakkety; urgency=medium ++ ++ * 8165323: (fs) Files.getFileStore fails with "Mount point not found" in ++ chroot environment ++ ++ -- Matthias Klose Wed, 21 Sep 2016 16:20:45 +0200 ++ ++openjdk-9 (9~b136-1) experimental; urgency=medium ++ ++ * OpenJDK 9, b136. ++ ++ -- Matthias Klose Mon, 19 Sep 2016 20:33:07 +0200 ++ ++openjdk-9 (9~b135-1) experimental; urgency=medium ++ ++ * OpenJDK 9, b135. ++ ++ -- Matthias Klose Sat, 10 Sep 2016 03:56:51 +0200 ++ ++openjdk-9 (9~b134-2ubuntu1) yakkety; urgency=medium ++ ++ * Explicitly turn off stack-protector for zero builds. ++ ++ -- Matthias Klose Thu, 08 Sep 2016 17:21:13 +0200 ++ ++openjdk-9 (9~b134-2) experimental; urgency=medium ++ ++ * Build using GCC 6. ++ * Enable zero build on arm64 and ppc64el. ++ * Increase thread stack size for ppc* zero builds. ++ * Don't use Solaris compiler flags on sparc64. ++ * Port x32 zero patch. ++ * Don't use the just built jdk for zero builds. ++ * Don't use getFileStore() during the build, chroots throw an ++ exception while running jlink. ++ * Fix removal of jre-headless alternatives. Closes: #788445. ++ ++ -- Matthias Klose Mon, 05 Sep 2016 14:14:01 +0200 ++ ++openjdk-9 (9~b133-2) experimental; urgency=medium ++ ++ * Don't do bootcycle builds for zero builds. ++ * Fix macro settings for zero builds. ++ ++ -- Matthias Klose Wed, 31 Aug 2016 18:34:51 +0200 ++ ++openjdk-9 (9~b133-1) experimental; urgency=medium ++ ++ * OpenJDK 9, b133. ++ ++ -- Matthias Klose Mon, 29 Aug 2016 14:25:48 +0200 ++ ++openjdk-9 (9~b130-1) experimental; urgency=medium ++ ++ * OpenJDK 9, b130. ++ ++ -- Matthias Klose Sun, 07 Aug 2016 23:27:51 +0200 ++ ++openjdk-9 (9~b124-1) experimental; urgency=medium ++ ++ * OpenJDK 9, b124. ++ * Fix an issue with libatk-wrapper (Samuel Thibault). Closes: #827796. ++ ++ -- Matthias Klose Sat, 28 May 2016 22:30:12 +0200 ++ ++openjdk-9 (9~b117-1) experimental; urgency=medium ++ ++ * OpenJDK 9, b117. ++ * Fix zero builds, next try. ++ ++ -- Matthias Klose Thu, 05 May 2016 23:10:57 +0200 ++ ++openjdk-9 (9~b116-1) experimental; urgency=medium ++ ++ * OpenJDK 9, b116. ++ ++ -- Matthias Klose Fri, 29 Apr 2016 00:41:04 +0200 ++ ++openjdk-9 (9~b115-2) experimental; urgency=medium ++ ++ * Fix zero builds: ++ - Backport 8132051, 8146518, 8150654, 8154210 from the hs repo. ++ - Apply proposed patch for 8153275. ++ * Set initial VMThreadStackSize to 1600 on s390x. ++ * Tighten openjdk build dependency on s390x. ++ ++ -- Matthias Klose Sat, 23 Apr 2016 23:09:19 +0200 ++ ++openjdk-9 (9~b115-1) experimental; urgency=medium ++ ++ * OpenJDK 9, b115. ++ * Fix header file conflict. Closes: #816440. LP: #1550950. ++ ++ -- Matthias Klose Thu, 21 Apr 2016 23:48:04 +0200 ++ ++openjdk-9 (9~b114-0ubuntu1) xenial; urgency=medium ++ ++ * OpenJDK 9, b114. ++ ++ -- Matthias Klose Thu, 14 Apr 2016 21:02:34 +0200 ++ ++openjdk-9 (9~b113-1) experimental; urgency=medium ++ ++ * OpenJDK 9, b113. ++ ++ -- Matthias Klose Wed, 13 Apr 2016 20:58:18 +0200 ++ ++openjdk-9 (9~b112-3) experimental; urgency=medium ++ ++ * Fix zero builds, apply proposed patch for JDK-8153275. ++ ++ -- Matthias Klose Fri, 01 Apr 2016 18:39:09 +0200 ++ ++openjdk-9 (9~b112-2) experimental; urgency=medium ++ ++ * OpenJDK 9, b112. ++ ++ -- Matthias Klose Thu, 31 Mar 2016 15:44:43 +0200 ++ ++openjdk-9 (9~b107-1) experimental; urgency=medium ++ ++ * OpenJDK 9, b107. ++ * Build-depend on openjdk-9-jdk-headless . ++ * Build with GCC 5 on mips*. ++ * Split out an openjdk-9-jdk-headless package. ++ * Update libgconf/libgnome jre recommendations. Addresses: #813943. ++ * Update package reference in README. Addresses: #814605. ++ * Add french translation for policytool desktop file. Addresses: #813851. ++ * Install app icons again. ++ ++ -- Matthias Klose Sat, 27 Feb 2016 11:55:04 +0100 ++ ++openjdk-9 (9~b102-1) experimental; urgency=medium ++ ++ * OpenJDK 9, b102. ++ ++ -- Matthias Klose Tue, 26 Jan 2016 13:33:16 +0100 ++ ++openjdk-9 (9~b101-2ubuntu2) xenial; urgency=medium ++ ++ * Don't build zero on arm64. ++ ++ -- Matthias Klose Sun, 17 Jan 2016 11:17:19 +0100 ++ ++openjdk-9 (9~b101-2ubuntu1) xenial; urgency=medium ++ ++ * Ignore installing jsadebugd.1. ++ ++ -- Matthias Klose Sun, 17 Jan 2016 10:33:06 +0100 ++ ++openjdk-9 (9~b101-2) experimental; urgency=medium ++ ++ * Disable the zero build on ppc64el, currently fails to build. ++ ++ -- Matthias Klose Sat, 16 Jan 2016 10:30:18 +0100 ++ ++openjdk-9 (9~b101-1) experimental; urgency=medium ++ ++ * OpenJDK 9, b101. ++ * Fix cross builds. ++ * Build again using GCC 4.9 on mips*, fails to build with GCC 5. ++ ++ -- Matthias Klose Fri, 15 Jan 2016 12:25:14 +0100 ++ ++openjdk-9 (9~b96-1) experimental; urgency=medium ++ ++ * OpenJDK 9, b96. ++ * openjdk-9-jdk: Fix typo in sdk provides. Addresses: #803150. ++ * Build using giflib 5. ++ * Build using GCC 5 everywhere. ++ ++ -- Matthias Klose Sun, 13 Dec 2015 17:45:17 +0100 ++ ++openjdk-9 (9~b94-1) experimental; urgency=medium ++ ++ * OpenJDK 9, b94. ++ * Update configury for sparc64 (Steven Chamberlain). Addresses: #806202. ++ * Fix stripping packages (use bash instead of expr substring). ++ ++ -- Matthias Klose Tue, 01 Dec 2015 11:05:27 +0100 ++ ++openjdk-9 (9~b88-1) experimental; urgency=medium ++ ++ * OpenJDK 9, b88. ++ ++ -- Matthias Klose Sun, 25 Oct 2015 02:38:14 +0200 ++ ++openjdk-9 (9~b87-2) wily; urgency=medium ++ ++ * Prefer openjdk-8-jdk for the build. ++ ++ -- Matthias Klose Tue, 20 Oct 2015 14:21:25 +0200 ++ ++openjdk-9 (9~b87-1) experimental; urgency=medium ++ ++ * OpenJDK 9, b87. ++ * Build the client hotspot on i386. ++ ++ -- Matthias Klose Mon, 19 Oct 2015 17:58:08 +0200 ++ ++openjdk-9 (9~b80-2) experimental; urgency=medium ++ ++ * Fix build for armel, armhf and "unknown" zero architectures. ++ * Fix build error on AArch64. ++ ++ -- Matthias Klose Sat, 05 Sep 2015 20:16:45 +0200 ++ ++openjdk-9 (9~b80-1) experimental; urgency=medium ++ ++ * OpenJDK 9, b80. ++ * Fix installing the openjdk.desktop file when cautious-launch is available. ++ * Define _alpha_ / _sh_ preprocessor macros instead of alpha / sh. ++ * Re-enable the atk bridge for releases with a fixed atk bridge. ++ * Make derivatives builds the same as the parent distro. ++ * Add m68k support for Zero (Andreas Schwab). ++ ++ -- Matthias Klose Fri, 04 Sep 2015 19:27:56 +0200 ++ ++openjdk-9 (9~b74-1) experimental; urgency=medium ++ ++ * OpenJDK 9, b74. ++ * Fix jdk build on x32. ++ * JDK-8073754, increase stack size limits on powerpc and ppc64. ++ * Configure --with-boot-jdk-jvmargs="-XX:ThreadStackSize=2240" on ++ powerpc and ppc64. ++ ++ -- Matthias Klose Mon, 27 Jul 2015 20:43:48 +0200 ++ ++openjdk-9 (9~b71-1) experimental; urgency=medium ++ ++ * OpenJDK 9, b71. ++ * openjdk-9-jre: Recommend the real libgconf2-4 and libgnome2-0 packages. ++ Addresses: #786594. ++ ++ -- Matthias Klose Mon, 06 Jul 2015 17:29:39 +0200 ++ ++openjdk-9 (9~b68-4) experimental; urgency=medium ++ ++ * Fix 32bit detection for the build jdk; try to build again for mips ++ and mipsel (James Cowgill). ++ * openjdk-jre-headless: Add dependency on the package containing the ++ mountpoint binary. ++ ++ -- Matthias Klose Mon, 22 Jun 2015 22:09:17 +0200 ++ ++openjdk-9 (9~b68-3) experimental; urgency=medium ++ ++ * Enable bootcycle zero builds everywhere. ++ * Re-enable running the testsuite. ++ ++ -- Matthias Klose Thu, 18 Jun 2015 23:55:33 +0200 ++ ++openjdk-9 (9~b68-2) experimental; urgency=medium ++ ++ * Fix 8080684, PPC64 little-endian build. ++ * Fix installation of zero based builds. ++ ++ -- Matthias Klose Wed, 17 Jun 2015 19:03:31 +0200 ++ ++openjdk-9 (9~b68-1) experimental; urgency=medium ++ ++ * OpenJDK 9, b68. ++ * Fix the build on AArch64 (Ed Nevill). ++ ++ -- Matthias Klose Tue, 16 Jun 2015 11:39:02 +0200 ++ ++openjdk-9 (9~b64-1) experimental; urgency=medium ++ ++ * OpenJDK 9, b64. ++ * Fix build error in zero. ++ ++ -- Matthias Klose Mon, 18 May 2015 17:30:20 +0200 ++ ++openjdk-8 (8u45-b14-2) unstable; urgency=medium ++ ++ * Fix JamVM with 8u45. Closes: #766284. ++ ++ -- Matthias Klose Sun, 10 May 2015 19:28:41 +0200 ++ ++openjdk-8 (8u45-b14-1) unstable; urgency=medium ++ ++ * Update to 8u45-b14. ++ * Update AArch64 to (post) 8u45-b14. ++ * Make libnss3-dev installable on precise (Thorsten Glaser). LP: #1411630. ++ * Only install the openjdk-java.desktop file when using cautious-launcher. ++ ++ -- Matthias Klose Sun, 19 Apr 2015 16:31:44 +0200 ++ ++openjdk-8 (8u40-b27-1) unstable; urgency=medium ++ ++ * Update to 8u40-b27. ++ * Update AArch64 to (post) 8u40-b25. ++ * Fix libjavajpeg build using the system jpeg library. Closes: #760926. ++ ++ -- Matthias Klose Tue, 17 Mar 2015 01:19:45 +0100 ++ ++openjdk-8 (8u40~b22-1ubuntu1) vivid; urgency=medium ++ ++ * Update AArch64 to 8u40-b22. ++ * Update the alpha float patch. ++ * Fix JDK-8067330, ZERO_ARCHDEF incorrectly defined for PPC/PPC64 ++ architectures. ++ * Fix JDK-8067331, Zero: Atomic::xchg and Atomic::xchg_ptr need ++ full memory barrier. ++ * Build using OpenJDK-8. ++ ++ -- Matthias Klose Tue, 27 Jan 2015 14:59:00 +0100 ++ ++openjdk-8 (8u40~b22-1) unstable; urgency=medium ++ ++ * Update to 8u40-b22. ++ * Fix build on mips64 and mips64el. Closes: #776295. ++ * Don't strip libjvm.so to prevent rejection by ftp-master (work around, ++ but no fix in the archive). Addresses: #775760. ++ * Fix jamvm to work with recent security updates. Closes: #766284. ++ ++ -- Matthias Klose Mon, 26 Jan 2015 16:59:37 +0100 ++ ++openjdk-8 (8u40~b21-1) unstable; urgency=medium ++ ++ * Update to 8u40-b21. ++ ++ -- Matthias Klose Thu, 15 Jan 2015 12:14:18 +0100 ++ ++openjdk-8 (8u40~b10-1) unstable; urgency=medium ++ ++ * Fix libjpeg runtime dependency. ++ ++ -- Matthias Klose Thu, 16 Oct 2014 08:38:13 +0200 ++ ++openjdk-8 (8u40~b09-1) unstable; urgency=medium ++ ++ * Update to 8u40-b09. ++ * Update the AArch64 hotspot to 8u40-b09. ++ * Allow to build for Ubuntu 12.04 LTS. ++ * Change B-D to libjpeg-dev to finish the transition to libjpeg-turbo ++ (Ondřej Surý). Closes: #763490. ++ * Backport the fix for 8017773 OpenJDK returns incorrect TrueType ++ font metrics. Closes: #762323. ++ * Depend on libnss3 instead of libnss3-1d for recent releases. ++ Addresses: #760122. ++ ++ -- Matthias Klose Tue, 14 Oct 2014 12:39:18 +0200 ++ ++openjdk-8 (8u40~b04-2) unstable; urgency=medium ++ ++ * Remove AArch64 patch applied upstream. ++ * Update the kfresbsd jdk patch, still not forwarded upstream. ++ ++ -- Matthias Klose Tue, 09 Sep 2014 17:08:32 +0200 ++ ++openjdk-8 (8u40~b04-1) experimental; urgency=medium ++ ++ * Update to 8u40-b04. ++ * Backport 8050942, implement template interpreter for ppc64le. ++ * Build-depend on systemtap-sdt-dev. ++ ++ -- Matthias Klose Wed, 03 Sep 2014 21:11:27 +0200 ++ ++openjdk-8 (8u20-b26-1) experimental; urgency=medium ++ ++ * 8u20 build 26 is the final 8u20 release. ++ * Update the AArch64 hotspot. ++ * Fix applying the kfreebsd patch for JamVM. ++ * x32 build fixes. ++ * Allow openjdk-8-jdk as an alternative build dependency. ++ * Adjust timeouts for jtreg runs. ++ ++ -- Matthias Klose Wed, 20 Aug 2014 10:34:55 +0200 ++ ++openjdk-8 (8u20~b26-1) experimental; urgency=medium ++ ++ * Update to 8u20-b26. ++ * Update to JamVM 2.0.0. ++ * Update to IcedTea-Sound 1.0.1. ++ * Update toplevel configury to recognize zero archs alpha, mips*, ++ m68k, sh4. ++ * Update kfreebsd-support patches (Steven Chamberlain). ++ * Fix an uninitialized memory issue in adlc (Fridrich Strba). ++ * Move libjavagtk into the -jre package. ++ * Use the system libpcsclite library. ++ * Fix typo, ignoring boot cycle builds (Emmanuel Bourg). ++ * Derive the update version and the build number from the package ++ version (Emmanuel Bourg). ++ * Call quilt with --quiltrc -. Closes: #755710. ++ * openjdk-8-jdk: Fix src.zip symlink. Closes: #755869. ++ ++ -- Matthias Klose Thu, 31 Jul 2014 19:51:35 +0200 ++ ++openjdk-8 (8u20~b20-2) experimental; urgency=medium ++ ++ * Work around OpenJDK's build system which is not robust enough ++ to accept commas in *FLAGS. ++ * Pass extra flags for non-hotspot builds. ++ * Fix the zero build on i386. ++ * Don't add extra symlinks for the jni_{md,jawt}.h header files. ++ ++ -- Matthias Klose Fri, 11 Jul 2014 20:30:54 +0200 ++ ++openjdk-8 (8u20~b20-1) experimental; urgency=medium ++ ++ * Initial OpenJDK 8 packaging, based on 8u20-b20. ++ * Fix hotspot build system for GNU make 4.0 (Emmanuel Bourg). ++ * Drop rhino (build) dependencies (Emmanuel Bourg). ++ * Add java8 provides (Emmanuel Bourg). ++ * Add IcedTea patches to build with external jpeg, png and lcms ++ libraries (Emmanuel Bourg). ++ * Add keywords to the desktop files (Emmanuel Bourg). ++ * Remove the suggested dependency on sun-java6-fonts ((Emmanuel Bourg). ++ * Build hotspot on ppc64 and ppc64el. ++ * Add the IcedTea Sound tarball. ++ * Don't strip files when building the images. ++ * Update patches to pass the extra flags to the libsig and libsaproc builds. ++ * Use dh_strip's knowledge about build ids when available. ++ ++ -- Matthias Klose Wed, 09 Jul 2014 20:11:18 +0200 ++ ++openjdk-7 (7u55-2.4.7-2) unstable; urgency=medium ++ ++ * Fix the quoting of configure flags for the zero build. ++ * Update the java-access-bridge-security patch (Raphael Geissert). ++ * Don't hard code the compiler names in the AArch64 hotspot build. ++ * Build using GCC 4.9 where available. ++ * Add MIPS64(el) support (Yunqiang Su). Closes: #746207. ++ * Suggest fonts-indic instead of ttf-indic-fonts. Closes: #747694. ++ ++ -- Matthias Klose Fri, 16 May 2014 19:12:42 +0200 ++ ++openjdk-7 (7u55-2.4.7-1) unstable; urgency=high ++ ++ * IcedTea7 2.4.7 release. ++ * Security fixes ++ - S8023046: Enhance splashscreen support. ++ - S8025005: Enhance CORBA initializations. ++ - S8025010, CVE-2014-2412: Enhance AWT contexts. ++ - S8025030, CVE-2014-2414: Enhance stream handling. ++ - S8025152, CVE-2014-0458: Enhance activation set up. ++ - S8026067: Enhance signed jar verification. ++ - S8026163, CVE-2014-2427: Enhance media provisioning. ++ - S8026188, CVE-2014-2423: Enhance envelope factory. ++ - S8026200: Enhance RowSet Factory. ++ - S8026716, CVE-2014-2402: (aio) Enhance asynchronous channel handling. ++ - S8026736, CVE-2014-2398: Enhance Javadoc pages. ++ - S8026797, CVE-2014-0451: Enhance data transfers. ++ - S8026801, CVE-2014-0452: Enhance endpoint addressing. ++ - S8027766, CVE-2014-0453: Enhance RSA processing. ++ - S8027775: Enhance ICU code. ++ - S8027841, CVE-2014-0429: Enhance pixel manipulations. ++ - S8028385: Enhance RowSet Factory. ++ - S8029282, CVE-2014-2403: Enhance CharInfo set up. ++ - S8029286: Enhance subject delegation. ++ - S8029699: Update Poller demo. ++ - S8029730: Improve audio device additions. ++ - S8029735: Enhance service mgmt natives. ++ - S8029740, CVE-2014-0446: Enhance handling of loggers. ++ - S8029745, CVE-2014-0454: Enhance algorithm checking. ++ - S8029750: Enhance LCMS color processing (LCMS 2 only). ++ - S8029760, CVE-2013-6629: Enhance AWT image libraries (in-tree libjpeg). ++ - S8029844, CVE-2014-0455: Enhance argument validation. ++ - S8029854, CVE-2014-2421: Enhance JPEG decodings. ++ - S8029858, CVE-2014-0456: Enhance array copies. ++ - S8030731, CVE-2014-0460: Improve name service robustness. ++ - S8031330: Refactor ObjectFactory. ++ - S8031335, CVE-2014-0459: Better color profiling. ++ - S8031352, CVE-2013-6954: Enhance PNG handling (in-tree libpng). ++ - S8031394, CVE-2014-0457: (sl) Fix exception handling in ServiceLoader. ++ - S8031395: Enhance LDAP processing. ++ - S8032686, CVE-2014-2413: Issues with method invoke. ++ - S8033618, CVE-2014-1876: Correct logging output. ++ - S8034926, CVE-2014-2397: Attribute classes properly. ++ - S8036794, CVE-2014-0461: Manage JavaScript instances. ++ * AArch64 fixes. ++ ++ -- Matthias Klose Wed, 16 Apr 2014 15:37:40 +0200 ++ ++openjdk-7 (7u51-2.4.6-1) unstable; urgency=medium ++ ++ * IcedTea7 2.4.6 release. ++ * Explicitly use AC_MAINTAINER_MODE and automake-1.11 to create the ++ debian .orig tarball. Addresses: #740289. ++ * Apply patch from upstream to fix bold fonts in Swing applications using ++ GTK L&F (Ryan Tandy). LP: #937200. ++ * Explicitly build-depend on libkrb5-dev. ++ * On AArch64 don't use the hotsport backport for the zero build. ++ ++ -- Matthias Klose Tue, 01 Apr 2014 09:25:19 +0200 ++ ++openjdk-7 (7u51-2.4.6~pre1-1) unstable; urgency=medium ++ ++ * IcedTea7 2.4.6 prerelease. ++ * Fix icedtea-web build failure on kfreebsd-* (unable to find ++ sun.security.util.SecurityConstants). Steven Chamberlain. Closes: #739032. ++ * Update the AArch64 Hotspot. ++ ++ -- Matthias Klose Thu, 27 Mar 2014 17:24:45 +0100 ++ ++openjdk-7 (7u51-2.4.5-2) unstable; urgency=medium ++ ++ * Update the KFreeBSD patch (Steven Chamberlain). Closes: #736291. ++ ++ -- Matthias Klose Tue, 04 Feb 2014 13:28:10 +0100 ++ ++openjdk-7 (7u51-2.4.5-1) unstable; urgency=medium ++ ++ * IcedTea7 2.4.5 release. ++ * Build Hotspot client and server vms for AArch64. ++ ++ -- Matthias Klose Fri, 31 Jan 2014 06:13:20 -0500 ++ ++openjdk-7 (7u51-2.4.4-1) unstable; urgency=medium ++ ++ * IcedTea7 2.4.4 release. ++ * Security fixes ++ - S6727821: Enhance JAAS Configuration. ++ - S7068126, CVE-2014-0373: Enhance SNMP statuses. ++ - S8010935: Better XML handling. ++ - S8011786, CVE-2014-0368: Better applet networking. ++ - S8021257, S8025022, CVE-2013-5896 : com.sun.corba.se.** should be ++ on restricted package list. ++ - S8021271, S8021266, CVE-2014-0408: Better buffering in ObjC code. ++ - S8022904: Enhance JDBC Parsers. ++ - S8022927: Input validation for byte/endian conversions. ++ - S8022935: Enhance Apache resolver classes. ++ - S8022945: Enhance JNDI implementation classes. ++ - S8023057: Enhance start up image display. ++ - S8023069, CVE-2014-0411: Enhance TLS connections. ++ - S8023245, CVE-2014-0423: Enhance Beans decoding. ++ - S8023301: Enhance generic classes. ++ - S8023338: Update jarsigner to encourage timestamping. ++ - S8023672: Enhance jar file validation. ++ - S8024302: Clarify jar verifications. ++ - S8024306, CVE-2014-0416: Enhance Subject consistency. ++ - S8024530: Enhance font process resilience. ++ - S8024867: Enhance logging start up. ++ - S8025014: Enhance Security Policy. ++ - S8025018, CVE-2014-0376: Enhance JAX-P set up. ++ - S8025026, CVE-2013-5878: Enhance canonicalization. ++ - S8025034, CVE-2013-5907: Improve layout lookups. ++ - S8025448: Enhance listening events. ++ - S8025758, CVE-2014-0422: Enhance Naming management. ++ - S8025767, CVE-2014-0428: Enhance IIOP Streams. ++ - S8026172: Enhance UI Management. ++ - S8026176: Enhance document printing. ++ - S8026193, CVE-2013-5884: Enhance CORBA stub factories. ++ - S8026204: Enhance auth login contexts. ++ - S8026417, CVE-2013-5910: Enhance XML canonicalization. ++ - S8026502: java/lang/invoke/MethodHandleConstants.java fails on all ++ platforms. ++ - S8027201, CVE-2014-0376: Enhance JAX-P set up. ++ - S8029507, CVE-2013-5893: Enhance JVM method processing. ++ - S8029533: REGRESSION: closed/java/lang/invoke/8008140/Test8008140.java ++ fails agains. ++ * Remove alpha from stage1_gcj_archs. ++ * Use the langtools and jdk tarballs as provided by IcedTea. ++ * Hotspot is dead on sparc. Build the zero interpreter as the default. ++ * Blindly update the KF***BSD patches. ++ ++ -- Matthias Klose Wed, 15 Jan 2014 10:34:34 +0100 ++ ++openjdk-7 (7u45-2.4.3-5) unstable; urgency=medium ++ ++ * Run the jtreg tests on powerpcspe, tested by Roland Stigge. ++ * Fix zero builds on 64k page kernel configs. ++ * Fix more IcedTea bits to build on x32. ++ ++ -- Matthias Klose Sat, 11 Jan 2014 13:55:34 +0100 ++ ++openjdk-7 (7u45-2.4.3-4) unstable; urgency=low ++ ++ * Re-enable running the testsuite on powerpc. ++ * Run the testsuite on AArch64. ++ * Fix IcedTea bits to build on x32. ++ ++ -- Matthias Klose Sun, 22 Dec 2013 21:20:10 +0100 ++ ++openjdk-7 (7u45-2.4.3-3) unstable; urgency=low ++ ++ * Don't build on s390 anymore. ++ * Update hotspot-mips-align patch (Aurelien Jarno). Closes: #732528). ++ * Build for ppc64el. ++ * Try to build zero on x32. ++ * Configure with --enable-zero on sparc and sparc64. ++ ++ -- Matthias Klose Fri, 20 Dec 2013 14:42:38 +0100 ++ ++openjdk-7 (7u45-2.4.3-2.3) unstable; urgency=medium ++ ++ * Disable bootstrap build on alpha. Closes: #719671. ++ * Disable running the jdk jtreg tests on the hotspot architectures. ++ Hanging on the buildds. ++ * Re-enable the jexec patch, program logic confused by running jexec ++ outside the assumed java home. Closes: #731961. ++ * Don't apply the s390 patches on s390x. s390 is successfully dead. ++ * Fix zero builds on little endian architectures, taken from the trunk. ++ ++ -- Matthias Klose Thu, 12 Dec 2013 18:24:44 +0100 ++ ++openjdk-7 (7u45-2.4.3-1) unstable; urgency=medium ++ ++ * IcedTea7 2.4.3 release. ++ * Security fixes: ++ - S8006900, CVE-2013-3829: Add new date/time capability. ++ - S8008589: Better MBean permission validation. ++ - S8011071, CVE-2013-5780: Better crypto provider handling. ++ - S8011081, CVE-2013-5772: Improve jhat. ++ - S8011157, CVE-2013-5814: Improve CORBA portablility. ++ - S8012071, CVE-2013-5790: Better Building of Beans. ++ - S8012147: Improve tool support. ++ - S8012277: CVE-2013-5849: Improve AWT DataFlavor. ++ - S8012425, CVE-2013-5802: Transform TransformerFactory. ++ - S8013503, CVE-2013-5851: Improve stream factories. ++ - S8013506: Better Pack200 data handling. ++ - S8013510, CVE-2013-5809: Augment image writing code. ++ - S8013514: Improve stability of cmap class. ++ - S8013739, CVE-2013-5817: Better LDAP resource management. ++ - S8013744, CVE-2013-5783: Better tabling for AWT. ++ - S8014085: Better serialization support in JMX classes. ++ - S8014093, CVE-2013-5782: Improve parsing of images. ++ - S8014098: Better profile validation. ++ - S8014102, CVE-2013-5778: Improve image conversion. ++ - S8014341, CVE-2013-5803: Better service from Kerberos servers. ++ - S8014349, CVE-2013-5840: (cl) Class.getDeclaredClass problematic ++ in some class loader configurations. ++ - S8014530, CVE-2013-5825: Better digital signature processing. ++ - S8014534: Better profiling support. ++ - S8014987, CVE-2013-5842: Augment serialization handling. ++ - S8015614: Update build settings. ++ - S8015731: Subject java.security.auth.subject to improvements. ++ - S8015743, CVE-2013-5774: Address internet addresses. ++ - S8016256: Make finalization final. ++ - S8016653, CVE-2013-5804: javadoc should ignore ignoreable characters ++ in names. ++ - S8016675, CVE-2013-5797: Make Javadoc pages more robust. ++ - S8017196, CVE-2013-5850: Ensure Proxies are handled appropriately. ++ - S8017287, CVE-2013-5829: Better resource disposal. ++ - S8017291, CVE-2013-5830: Cast Proxies Aside. ++ - S8017298, CVE-2013-4002: Better XML support. ++ - S8017300, CVE-2013-5784: Improve Interface Implementation. ++ - S8017505, CVE-2013-5820: Better Client Service. ++ - S8019292: Better Attribute Value Exceptions. ++ - S8019617: Better view of objects. ++ - S8020293: JVM crash. ++ - S8021275, CVE-2013-5805: Better screening for ScreenMenu. ++ - S8021282, CVE-2013-5806: Better recycling of object instances. ++ - S8021286: Improve MacOS resourcing. ++ - S8021290, CVE-2013-5823: Better signature validation. ++ - S8022931, CVE-2013-5800: Enhance Kerberos exceptions. ++ - S8022940: Enhance CORBA translations. ++ - S8023683: Enhance class file parsing. ++ * Fix build failure on mips* (Aurelien Jarno). Closes: #729448). ++ * Run autoreconf. Closes: #724083. ++ * Merge the -jre-lib package into -jre-headless. Simplifies the packaging ++ and the savings were not as big as wanted, because the rt.jar is still ++ architecture dependant. Closes: #641049, #722510. ++ ++ -- Matthias Klose Sun, 08 Dec 2013 23:22:06 +0100 ++ ++openjdk-7 (7u25-2.3.12-4ubuntu3) saucy; urgency=low ++ ++ * Apply missing patch to fix arm64/AArch64 detection. ++ ++ -- Matthias Klose Fri, 11 Oct 2013 17:51:33 +0200 ++ ++openjdk-7 (7u25-2.3.12-4ubuntu2) saucy; urgency=low ++ ++ * openjdk-jre-headless: Loosen the dependency on -jre-lib. ++ ++ -- Matthias Klose Wed, 09 Oct 2013 16:29:15 +0200 ++ ++openjdk-7 (7u25-2.3.12-4ubuntu1) saucy; urgency=low ++ ++ * Regenerate the control file. ++ ++ -- Matthias Klose Fri, 16 Aug 2013 12:09:47 +0200 ++ ++openjdk-7 (7u25-2.3.12-4) unstable; urgency=low ++ ++ * Add the hotspot patches for AArch64, which apparently were not ++ included in the IcedTea release by intent. ++ * Don't interpret arm64 as an ARM architecture, but as AArch64. So ++ much for Debian calling this port arm64 ... ++ * Use host macros instead of build macros for corba and hotspot config. ++ * Re-add multiarch library directories to the default library path. ++ Closes: #712567. ++ * Enable the two-stage build on alpha. Closes: #719671. ++ * Build for powerpcspe (Roland Stigge). Closes: #712686. ++ * Recommend fonts-dejavu-extra instead of ttf-dejavu-extra for current ++ releases. Closes: #718839. ++ ++ -- Matthias Klose Thu, 15 Aug 2013 21:26:51 +0200 ++ ++openjdk-7 (7u25-2.3.12-3) unstable; urgency=low ++ ++ * Fix kFreeBSD builds (Thanks to Christoph Egger for his help). ++ ++ -- Damien Raude-Morvan Mon, 12 Aug 2013 00:39:41 +0200 ++ ++openjdk-7 (7u25-2.3.12-2) unstable; urgency=low ++ ++ [ Matthias Klose ] ++ * Regenerate the hotspot-s390 patch. ++ ++ [ Damien Raude-Morvan ] ++ * Update kfreebsd patches. ++ ++ -- Matthias Klose Sat, 03 Aug 2013 20:22:41 +0200 ++ ++openjdk-7 (7u25-2.3.12-1) unstable; urgency=low ++ ++ * IcedTea7 2.3.12 release. ++ * Don't build with pulseaudio on arm64. ++ * Disable bootstraped build on s390 and sparc. ++ ++ -- Matthias Klose Fri, 02 Aug 2013 15:55:01 +0200 ++ ++openjdk-7 (7u25-2.3.10-2ubuntu1) saucy; urgency=low ++ ++ * Regenerate the control file. ++ ++ -- Matthias Klose Mon, 15 Jul 2013 23:59:45 +0200 ++ ++openjdk-7 (7u25-2.3.10-2) unstable; urgency=low ++ ++ [ Matthias Klose ] ++ * Fix gcj-jdk build dependency on ia64 and s390. ++ * Build zero on arm64. ++ ++ [ Gianfranco Costamagna ] ++ * Fix build failure on kfreebsd (Closes: #714528) ++ ++ -- Matthias Klose Sun, 30 Jun 2013 17:12:28 +0200 ++ ++openjdk-7 (7u25-2.3.10-1) unstable; urgency=high ++ ++ * IcedTea7 2.3.10 release. ++ * Security fixes ++ * S6741606, CVE-2013-2407: Integrate Apache Santuario. ++ * S7158805, CVE-2013-2445: Better rewriting of nested subroutine calls. ++ * S7170730, CVE-2013-2451: Improve Windows network stack support. ++ * S8000638, CVE-2013-2450: Improve deserialization. ++ * S8000642, CVE-2013-2446: Better handling of objects for transportation. ++ * S8001032: Restrict object access. ++ * S8001033, CVE-2013-2452: Refactor network address handling in virtual ++ machine identifiers. ++ * S8001034, CVE-2013-1500: Memory management improvements. ++ * S8001038, CVE-2013-2444: Resourcefully handle resources. ++ * S8001043: Clarify definition restrictions. ++ * S8001308: Update display of applet windows. ++ * S8001309: Better handling of annotation interfaces. ++ * S8001318, CVE-2013-2447: Socket.getLocalAddress not consistent with ++ InetAddress.getLocalHost. ++ * S8001330, CVE-2013-2443: Improve on checking order (non-Zero builds only). ++ * S8003703, CVE-2013-2412: Update RMI connection dialog box. ++ * S8004288, CVE-2013-2449: (fs) Files.probeContentType problems. ++ * S8004584: Augment applet contextualization. ++ * S8005007: Better glyph processing. ++ * S8006328, CVE-2013-2448: Improve robustness of sound classes. ++ * S8006611: Improve scripting. ++ * S8007467: Improve robustness of JMX internal APIs. ++ * S8007471: Improve MBean notifications. ++ * S8007812, CVE-2013-2455: (reflect) Class.getEnclosingMethod problematic for some classes. ++ * S8007925: Improve cmsStageAllocLabV2ToV4curves. ++ * S8007926: Improve cmsPipelineDup. ++ * S8007927: Improve cmsAllocProfileSequenceDescription. ++ * S8007929: Improve CurvesAlloc. ++ * S8008120, CVE-2013-2457: Improve JMX class checking. ++ * S8008124, CVE-2013-2453: Better compliance testing. ++ * S8008128: Better API coherence for JMX. ++ * S8008132, CVE-2013-2456: Better serialization support. ++ * S8008585: Better JMX data handling. ++ * S8008593: Better URLClassLoader resource management. ++ * S8008603: Improve provision of JMX providers. ++ * S8008607: Better input checking in JMX. ++ * S8008611: Better handling of annotations in JMX. ++ * S8008615: Improve robustness of JMX internal APIs. ++ * S8008623: Better handling of MBeanServers. ++ * S8008744, CVE-2013-2407: Rework part of fix for JDK-6741606. ++ * S8008982: Adjust JMX for underlying interface changes. ++ * S8009004: Better implementation of RMI connections. ++ * S8009008: Better manage management-api. ++ * S8009013: Better handling of T2K glyphs. ++ * S8009034: Improve resulting notifications in JMX. ++ * S8009038: Improve JMX notification support. ++ * S8009057, CVE-2013-2448: Improve MIDI event handling. ++ * S8009067: Improve storing keys in KeyStore. ++ * S8009071, CVE-2013-2459: Improve shape handling. ++ * S8009235: Improve handling of TSA data. ++ * S8009424, CVE-2013-2458: Adapt Nashorn to JSR-292 implementation change. ++ * S8009554, CVE-2013-2454: Improve SerialJavaObject.getFields. ++ * S8009654: Improve stability of cmsnamed. ++ * S8010209, CVE-2013-2460: Better provision of factories. ++ * S8011243, CVE-2013-2470: Improve ImagingLib. ++ * S8011248, CVE-2013-2471: Better Component Rasters. ++ * S8011253, CVE-2013-2472: Better Short Component Rasters. ++ * S8011257, CVE-2013-2473: Better Byte Component Rasters. ++ * S8012375, CVE-2013-1571: Improve Javadoc framing. ++ * S8012421: Better positioning of PairPositioning. ++ * S8012438, CVE-2013-2463: Better image validation. ++ * S8012597, CVE-2013-2465: Better image channel verification. ++ * S8012601, CVE-2013-2469: Better validation of image layouts. ++ * S8014281, CVE-2013-2461: Better checking of XML signature. ++ * S8015997: Additional improvement in Javadoc framing. ++ * Breaks icedtea-netx (<< 1.4-2). ++ ++ -- Matthias Klose Fri, 28 Jun 2013 16:55:32 +0200 ++ ++openjdk-7 (7u21-2.3.9-5) unstable; urgency=low ++ ++ * Update kFreeBSD support (Guido Guenther). Closes: #708818. ++ * Stop building the transitional cacao package for sid. ++ ++ -- Matthias Klose Sat, 18 May 2013 20:13:33 +0200 ++ ++openjdk-7 (7u21-2.3.9-4) unstable; urgency=high ++ ++ * Build the transitional cacao package for sid as well. Apparently ++ some buildds are not updated to list wheezy as the code name for ++ the current distribution. ++ ++ -- Matthias Klose Thu, 02 May 2013 03:27:44 +0200 ++ ++openjdk-7 (7u21-2.3.9-3) unstable; urgency=high ++ ++ * Disable the cacao build again, causing build failures on i386 and s390. ++ * Build a transitional cacao jre package instead. ++ ++ -- Matthias Klose Tue, 30 Apr 2013 00:27:05 +0200 ++ ++openjdk-7 (7u21-2.3.9-2) unstable; urgency=high ++ ++ * On ia64, use gcj-4.7 for the bootstrap build. ++ * Drop the cacao jre from recommends to suggests. ++ * Re-enable cacao, was enabled in the 2.1.x series. ++ ++ -- Matthias Klose Sat, 27 Apr 2013 01:17:16 +0200 ++ ++openjdk-7 (7u21-2.3.9-1) unstable; urgency=high ++ ++ * IcedTea7 2.3.9 release. ++ * Security fixes: ++ - S6657673, CVE-2013-1518: Issues with JAXP. ++ - S7200507: Refactor Introspector internals. ++ - S8000724, CVE-2013-2417: Improve networking serialization. ++ - S8001031, CVE-2013-2419: Better font processing. ++ - S8001040, CVE-2013-1537: Rework RMI model. ++ - S8001322: Refactor deserialization. ++ - S8001329, CVE-2013-1557: Augment RMI logging. ++ - S8003335: Better handling of Finalizer thread. ++ - S8003445: Adjust JAX-WS to focus on API. ++ - S8003543, CVE-2013-2415: Improve processing of MTOM attachments. ++ - S8004261: Improve input validation. ++ - S8004336, CVE-2013-2431: Better handling of method handle intrinsic frames. ++ - S8004986, CVE-2013-2383: Better handling of glyph table. ++ - S8004987, CVE-2013-2384: Improve font layout. ++ - S8004994, CVE-2013-1569: Improve checking of glyph table. ++ - S8005432: Update access to JAX-WS. ++ - S8005943: (process) Improved Runtime.exec. ++ - S8006309: More reliable control panel operation. ++ - S8006435, CVE-2013-2424: Improvements in JMX. ++ - S8006790: Improve checking for windows. ++ - S8006795: Improve font warning messages. ++ - S8007406: Improve accessibility of AccessBridge. ++ - S8007617, CVE-2013-2420: Better validation of images. ++ - S8007667, CVE-2013-2430: Better image reading. ++ - S8007918, CVE-2013-2429: Better image writing. ++ - S8008140: Better method handle resolution. ++ - S8009049, CVE-2013-2436: Better method handle binding. ++ - S8009063, CVE-2013-2426: Improve reliability of ConcurrentHashMap. ++ - S8009305, CVE-2013-0401: Improve AWT data transfer. ++ - S8009677, CVE-2013-2423: Better setting of setters. ++ - S8009699, CVE-2013-2421: Methodhandle lookup. ++ - S8009814, CVE-2013-1488: Better driver management. ++ - S8009857, CVE-2013-2422: Problem with plugin. ++ * Backports: ++ - S7130662: GTK file dialog crashes with a NPE. ++ * Bug fixes ++ - PR1363: Fedora 19 / rawhide FTBFS SIGILL. ++ - PR1401: Fix Zero build on 2.3.8. ++ - Fix offset problem in ICU LETableReference. ++ - Change -Werror fix to preserve OpenJDK default. ++ - PR1303: Correct #ifdef to #if. ++ - PR1404: Failure to bootstrap with ecj 4.2. ++ ++ -- Matthias Klose Mon, 22 Apr 2013 03:27:08 +0200 ++ ++openjdk-7 (7u17-2.3.8-2) experimental; urgency=low ++ ++ * Remove Torsten Werner as uploader. ++ ++ -- Matthias Klose Mon, 01 Apr 2013 00:39:58 +0200 ++ ++openjdk-7 (7u17-2.3.8-1ubuntu1) raring; urgency=low ++ ++ * Regenerate the control file. ++ ++ -- Matthias Klose Sun, 31 Mar 2013 20:10:05 +0200 ++ ++openjdk-7 (7u17-2.3.8-1) experimental; urgency=low ++ ++ * IcedTea7 2.3.8 release. ++ * Security fixes: ++ - S8007014, CVE-2013-0809: Improve image handling. ++ - S8007675, CVE-2013-1493: Improve color conversion. ++ * Backports: ++ - S8002344: Krb5LoginModule config class does not return proper KDC list ++ from DNS. ++ - S8004344: Fix a crash in ToolkitErrorHandler() in XlibWrapper.c. ++ - S8006179: JSR292 MethodHandles lookup with interface using findVirtual(). ++ - S8006882: Proxy generated classes in sun.proxy package breaks JMockit. ++ * Bug fixes: ++ - PR1303: Correct #ifdef to #if. ++ - PR1340: Simplify the rhino class rewriter to avoid use of concurrency. ++ - Revert 7017193 and add the missing free call, until a better fix is ready. ++ ++ -- Matthias Klose Sun, 31 Mar 2013 14:31:11 +0200 ++ ++openjdk-7 (7u15-2.3.7-1ubuntu2) raring; urgency=low ++ ++ * Security fixes: ++ - S8007014, CVE-2013-0809: Improve image handling ++ - S8007675, CVE-2013-1493: Improve color conversion ++ - debian/rules: updated to add 8007014.patch and 8007675.patch ++ ++ -- Jamie Strandboge Wed, 06 Mar 2013 14:12:03 -0600 ++ ++openjdk-7 (7u15-2.3.7-1ubuntu1) raring; urgency=low ++ ++ * Regenerate the control file. ++ ++ -- Matthias Klose Wed, 20 Feb 2013 23:59:54 +0100 ++ ++openjdk-7 (7u15-2.3.7-1) experimental; urgency=low ++ ++ * IcedTea7 2.3.7 release. ++ * Security fixes: ++ - S8004937, CVE-2013-1484: Improve proxy construction. ++ - S8006439, CVE-2013-1485: Improve MethodHandles coverage. ++ - S8006446, CVE-2013-1486: Restrict MBeanServer access. ++ - S8006777, CVE-2013-0169: Improve TLS handling of invalid messages. ++ - S8007688: Blacklist known bad certificate. ++ * Backports: ++ - S8007393: Possible race condition after JDK-6664509. ++ - S8007611: logging behavior in applet changed. ++ * For zero builds, use the same hotspot version as in 2.1.6. ++ * Reenable bootstrap builds, except for alpha. ++ * Explicitly disable building on mips/mipsel. Not supported by the ++ Debian OpenJDK maintainers, the Debian mips porters, or the Debian ++ Java team. ++ ++ -- Matthias Klose Wed, 20 Feb 2013 23:33:58 +0100 ++ ++openjdk-7 (7u13-2.3.6-1) experimental; urgency=low ++ ++ * IcedTea7 2.3.6 release. ++ - Disable bootstrap builds, currently broken in IcedTea. ++ * Security fixes: ++ - S6563318, CVE-2013-0424: RMI data sanitization. ++ - S6664509, CVE-2013-0425: Add logging context. ++ - S6664528, CVE-2013-0426: Find log level matching its name or value given ++ at construction time. ++ - S6776941: CVE-2013-0427: Improve thread pool shutdown. ++ - S7141694, CVE-2013-0429: Improving CORBA internals. ++ - S7173145: Improve in-memory representation of splashscreens. ++ - S7186945: Unpack200 improvement. ++ - S7186946: Refine unpacker resource usage. ++ - S7186948: Improve Swing data validation. ++ - S7186952, CVE-2013-0432: Improve clipboard access. ++ - S7186954: Improve connection performance. ++ - S7186957: Improve Pack200 data validation. ++ - S7192392, CVE-2013-0443: Better validation of client keys. ++ - S7192393, CVE-2013-0440: Better Checking of order of TLS Messages. ++ - S7192977, CVE-2013-0442: Issue in toolkit thread. ++ - S7197546, CVE-2013-0428: (proxy) Reflect about creating reflective proxies. ++ - S7200491: Tighten up JTable layout code. ++ - S7200500: Launcher better input validation. ++ - S7201064: Better dialogue checking. ++ - S7201066, CVE-2013-0441: Change modifiers on unused fields. ++ - S7201068, CVE-2013-0435: Better handling of UI elements. ++ - S7201070: Serialization to conform to protocol. ++ - S7201071, CVE-2013-0433: InetSocketAddress serialization issue. ++ - S8000210: Improve JarFile code quality. ++ - S8000537, CVE-2013-0450: Contextualize RequiredModelMBean class. ++ - S8000540, CVE-2013-1475: Improve IIOP type reuse management. ++ - S8000631, CVE-2013-1476: Restrict access to class constructor. ++ - S8001235, CVE-2013-0434: Improve JAXP HTTP handling. ++ - S8001242: Improve RMI HTTP conformance. ++ - S8001307: Modify ACC_SUPER behavior. ++ - S8001972, CVE-2013-1478: Improve image processing. ++ - S8002325, CVE-2013-1480: Improve management of images. ++ * Fix font suggestion for indic fonts in wheezy. ++ * Fix fontconfig definitions for japanese and korean fonts, fixing ++ compilation of the fontconfig file. ++ * Add Built-Using: rhino attribute for the -lib package. ++ * Don't use concurrent features to rewrite the rhino jar file. ++ * Enable class data sharing for the hotspot server VM. ++ ++ -- Matthias Klose Tue, 12 Feb 2013 20:59:48 +0100 ++ ++openjdk-7 (7u9-2.3.4-1) experimental; urgency=low ++ ++ * IcedTea7 2.3.4 release. ++ * Security fixes ++ - S8004933, CVE-2012-3174: Improve MethodHandle interaction with libraries. ++ - S8006017, CVE-2013-0422: Improve lookup resolutions. ++ - S8006125: Update MethodHandles library interactions. ++ * Bug fixes ++ - S7197906: BlockOffsetArray::power_to_cards_back() needs to handle > 32 bit ++ shifts. ++ - G422525: Fix building with PaX enabled kernels. ++ ++ [ Matthias Klose ] ++ * Loosen OpenGL dependency. Closes: #695028. ++ * Fix error parsing drop files parameter from pcmanfm (Alberto Fernández ++ Martínez). Closes: #695992. ++ ++ [ Thorsten Glaser ] ++ * debian/rules: Use gcj-4.6-jdk for m68k builds. ++ * d/patches/text-relocations.patch: build with -fPIC on all archs. ++ ++ -- Matthias Klose Tue, 15 Jan 2013 23:38:48 +0100 ++ ++openjdk-7 (7u9-2.3.3-1) experimental; urgency=low ++ ++ * Upload to experimental. ++ ++ -- Matthias Klose Wed, 17 Oct 2012 15:16:51 +0200 ++ ++openjdk-7 (7u9-2.3.3-0ubuntu1) quantal-security; urgency=low ++ ++ * IcedTea7 2.3.3 release. ++ * Security fixes ++ - S6631398, CVE-2012-3216: FilePermission improved path checking. ++ - S7093490: adjust package access in rmiregistry. ++ - S7143535, CVE-2012-5068: ScriptEngine corrected permissions. ++ - S7158796, CVE-2012-5070: Tighten properties checking in EnvHelp. ++ - S7158807: Revise stack management with volatile call sites. ++ - S7163198, CVE-2012-5076: Tightened package accessibility. ++ - S7167656, CVE-2012-5077: Multiple Seeders are being created. ++ - S7169884, CVE-2012-5073: LogManager checks do not work correctly for ++ sub-types. ++ - S7169887, CVE-2012-5074: Tightened package accessibility. ++ - S7169888, CVE-2012-5075: Narrowing resource definitions in JMX RMI ++ connector. ++ - S7172522, CVE-2012-5072: Improve DomainCombiner checking. ++ - S7186286, CVE-2012-5081: TLS implementation to better adhere to RFC. ++ - S7189103, CVE-2012-5069: Executors needs to maintain state. ++ - S7189490: More improvements to DomainCombiner checking. ++ - S7189567, CVE-2012-5085: java net obselete protocol. ++ - S7192975, CVE-2012-5071: Issue with JMX reflection. ++ - S7195194, CVE-2012-5084: Better data validation for Swing. ++ - S7195549, CVE-2012-5087: Better bean object persistence. ++ - S7195917, CVE-2012-5086: XMLDecoder parsing at close-time should be ++ improved. ++ - S7195919, CVE-2012-5979: (sl) ServiceLoader can throw CCE without ++ needing to create instance. ++ - S7196190, CVE-2012-5088: Improve method of handling MethodHandles. ++ - S7198296, CVE-2012-5089: Refactor classloader usage. ++ - S7158800: Improve storage of symbol tables. ++ - S7158801: Improve VM CompileOnly option. ++ - S7158804: Improve config file parsing. ++ - S7198606, CVE-2012-4416: Improve VM optimization. ++ ++ -- Matthias Klose Wed, 17 Oct 2012 13:27:47 +0200 ++ ++openjdk-7 (7u7-2.3.2a-1ubuntu1) quantal; urgency=low ++ ++ * Build a transitional icedtea-7-jre-cacao package to ease upgrades. ++ ++ -- Matthias Klose Wed, 19 Sep 2012 17:42:39 +0200 ++ ++openjdk-7 (7u7-2.3.2a-1) experimental; urgency=low ++ ++ * Upload to experimental. ++ ++ -- Matthias Klose Sat, 15 Sep 2012 22:20:06 +0200 ++ ++openjdk-7 (7u7-2.3.2a-0ubuntu1) quantal; urgency=low ++ ++ * Repackage the source to drop the cacao tarball (and packaging files). ++ * Depend again on system provided tzdata-java and restore the zi ++ symlink on upgrade. LP: #1050404. ++ * libgnome2-0, libgnomevfs2-0, libgconf2-4 are not prepared for multiarch. ++ Don't depend on these so that openjdk-7 can be installed as a multiarch ++ package. ++ ++ -- Matthias Klose Sat, 15 Sep 2012 17:01:12 +0200 ++ ++openjdk-7 (7u7-2.3.2-1ubuntu2) quantal; urgency=low ++ ++ * Make the avian VM a known runtime. ++ ++ -- Matthias Klose Wed, 05 Sep 2012 11:58:35 +0200 ++ ++openjdk-7 (7u7-2.3.2-1ubuntu1) quantal; urgency=low ++ ++ * Fix 32bit hotspot build, don't set maximal heap space lower than ++ minimal heap space for the docs build. ++ * d/p/sane-library-paths.patch, d/p/ant-diagnostics.diff, ++ d/p/fix-race-cond-print.diff, d/p/gcc-hotspot-opt-O[02].diff, ++ d/p/gcc-mtune-generic.diff, d/p/openjdk-6986968.diff: Remove, not used. ++ * Remove unused shark/llvm-3.0 patches. ++ * d/p/zero-only-use-floating-point-if-floating-poi.patch: Remove, applied ++ upstream. ++ * Don't explicitly build with -march=i586 on i386 architectures. ++ * Re-apply zero-missing-headers.diff. ++ * Disable cacao builds, needs update for 7u7. ++ * For Ubuntu quantal, set priorities for alternatives higher than for ++ OpenJDK 6. ++ * Call update-alternatives when the existing priority for the alternative ++ is lower than the current one. ++ * Configure with --disable-downloading. ++ * Pass -avoid-version to libtool to create a JamVM libjvm.so without SONAME ++ version numbers to match the Hotspot Server/Client libjvm.so. LP: #850433. ++ * Revert the following change: Move libgnome2-0, libgnomevfs2-0, libgconf2-4 ++ from Depends of JRE package to Recommends (#661465). ++ The proper fix is to create a -jdk-headless package, or not depending on ++ these gnome packages at all (e.g. using XDG libraries). ++ ++ -- Matthias Klose Tue, 04 Sep 2012 12:08:31 +0200 ++ ++openjdk-7 (7u7-2.3.2-1) experimental; urgency=low ++ ++ * New upstream IcedTea7 2.3.2 release. ++ * Security fixes: ++ - CVE-2012-4681: Reintroduce PackageAccessible checks removed in 6788531. ++ - S7079902, CVE-2012-1711: Refine CORBA data models. ++ - S7143606, CVE-2012-1717: File.createTempFile should be improved ++ for temporary files created by the platform. ++ - S7143614, CVE-2012-1716: SynthLookAndFeel stability improvement. ++ - S7143617, CVE-2012-1713: Improve fontmanager layout lookup operations. ++ - S7143851, CVE-2012-1719: Improve IIOP stub and tie generation in RMIC. ++ - S7143872, CVE-2012-1718: Improve certificate extension processing. ++ - S7152811, CVE-2012-1723: Issues in client compiler. ++ - S7157609, CVE-2012-1724: Issues with loop. ++ - S7160757, CVE-2012-1725: Problem with hotspot/runtime_classfile. ++ - S7165628, CVE-2012-1726: Issues with java.lang.invoke.MethodHandles.Lookup. ++ * Bump version to 7u7 (OpenJDK), 2.3.2 (IcedTea). Closes: #685276. ++ * d/p/icedtea7-forest-jdk_7104625-XEvent_wrap_logging_calls_with_if.patch, ++ d/p/hotspot-sparc.diff: Remove, integrated upstream. ++ * d/p/{deb-multiarch,fix_extra_flags,hotspot-no-werror}.diff: ++ Add variants for hotspot and zero builds. ++ * d/p/default-jvm-cfg.diff, d/p/icedtea-4953367.patch, ++ d/p/icedtea-patch.diff, d/p/icedtea-pretend-memory.diff, ++ d/p/libpcsclite-dlopen.diff, d/p/nonreparenting-wm.diff: ++ Update for 2.3.2. ++ * Remove build support for Ubuntu releases earlier than hardy. ++ * d/update-shasum.sh: Only update the shasums of the -dfsg tarballs. ++ * Don't apply shark patches (not built anyway). ++ ++ -- Matthias Klose Sat, 01 Sep 2012 11:46:50 +0200 ++ ++openjdk-7 (7u3-2.1.7-1) unstable; urgency=high ++ ++ * IcedTea7 2.1.7 release: ++ * Security fixes: ++ - S8007014, CVE-2013-0809: Improve image handling. ++ - S8007675, CVE-2013-1493: Improve color conversion. ++ * Backports: ++ - S8002344: Krb5LoginModule config class does not return proper KDC list ++ from DNS. ++ - S8004344: Fix a crash in ToolkitErrorHandler() in XlibWrapper.c. ++ - S8006179: JSR292 MethodHandles lookup with interface using findVirtual(). ++ - S8006882: Proxy generated classes in sun.proxy package breaks JMockit. ++ * Bug fixes: ++ - PR1303: Correct #ifdef to #if ++ - Stop libraries being stripped in the OpenJDK build. ++ - PR1340: Simplify the rhino class rewriter to avoid use of concurrency. ++ - Revert 7017193 and add the missing free call, until a better fix is ready. ++ ++ -- Matthias Klose Sat, 30 Mar 2013 11:31:12 +0100 ++ ++openjdk-7 (7u3-2.1.6-1) unstable; urgency=high ++ ++ * IcedTea7 2.1.5 release: ++ * Security fixes: ++ - S6563318, CVE-2013-0424: RMI data sanitization. ++ - S6664509, CVE-2013-0425: Add logging context. ++ - S6664528, CVE-2013-0426: Find log level matching its name or value ++ given at construction time. ++ - S6776941: CVE-2013-0427: Improve thread pool shutdown. ++ - S7141694, CVE-2013-0429: Improving CORBA internals. ++ - S7173145: Improve in-memory representation of splashscreens. ++ - S7186945: Unpack200 improvement. ++ - S7186946: Refine unpacker resource usage. ++ - S7186948: Improve Swing data validation. ++ - S7186952, CVE-2013-0432: Improve clipboard access. ++ - S7186954: Improve connection performance. ++ - S7186957: Improve Pack200 data validation. ++ - S7192392, CVE-2013-0443: Better validation of client keys. ++ - S7192393, CVE-2013-0440: Better Checking of order of TLS Messages. ++ - S7192977, CVE-2013-0442: Issue in toolkit thread. ++ - S7197546, CVE-2013-0428: (proxy) Reflect about creating reflective ++ proxies. ++ - S7200491: Tighten up JTable layout code. ++ - S7200493, CVE-2013-0444: Improve cache handling. ++ - S7200499: Better data validation for options. ++ - S7200500: Launcher better input validation. ++ - S7201064: Better dialogue checking. ++ - S7201066, CVE-2013-0441: Change modifiers on unused fields. ++ - S7201068, CVE-2013-0435: Better handling of UI elements. ++ - S7201070: Serialization to conform to protocol. ++ - S7201071, CVE-2013-0433: InetSocketAddress serialization issue. ++ - S8000210: Improve JarFile code quality. ++ - S8000537, CVE-2013-0450: Contextualize RequiredModelMBean class. ++ - S8000539, CVE-2013-0431: Introspect JMX data handling. ++ - S8000540, CVE-2013-1475: Improve IIOP type reuse management. ++ - S8000631, CVE-2013-1476: Restrict access to class constructor. ++ - S8001235, CVE-2013-0434: Improve JAXP HTTP handling. ++ - S8001242: Improve RMI HTTP conformance. ++ - S8001307: Modify ACC_SUPER behavior. ++ - S8001972, CVE-2013-1478: Improve image processing. ++ - S8002325, CVE-2013-1480: Improve management of images. ++ * Backports: ++ - S7054590: (JSR-292) MethodHandleProxies.asInterfaceInstance() ++ accepts private/protected nested interfaces. ++ - S7175616: Port fix for TimeZone from JDK 8 to JDK 7. ++ - S8002068: Build broken: corba code changes unable to use new ++ JDK 7 classes. ++ - S8004341: Two JCK tests fails with 7u11 b06. ++ - S8005615: Java Logger fails to load tomcat logger implementation (JULI). ++ * IcedTea7 2.1.6 release: ++ * Security fixes: ++ - S8004937, CVE-2013-1484: Improve proxy construction. ++ - S8006439, CVE-2013-1485: Improve MethodHandles coverage. ++ - S8006446, CVE-2013-1486: Restrict MBeanServer access. ++ - S8006777, CVE-2013-0169: Improve TLS handling of invalid messages. ++ - S8007688: Blacklist known bad certificate. ++ * Backports: ++ - S7123519: problems with certification path. ++ - S8007393: Possible race condition after JDK-6664509. ++ - S8007611: logging behavior in applet changed. ++ * Fix font suggestion for indic fonts in wheezy. ++ * Fix fontconfig definitions for japanese and korean fonts, fixing ++ compilation of the fontconfig file. ++ * Add Built-Using: rhino attribute for the -lib package. ++ * Don't use concurrent features to rewrite the rhino jar file. ++ * Enable class data sharing for the hotspot server VM. ++ * Enable bootstrap builds for alpha. ++ * Explicitly disable building on mips/mipsel. Not supported by the ++ Debian OpenJDK maintainers, the Debian mips porters, or the Debian ++ Java team. ++ ++ -- Matthias Klose Thu, 21 Feb 2013 03:38:13 +0100 ++ ++openjdk-7 (7u3-2.1.4-1) unstable; urgency=low ++ ++ * IcedTea7 2.1.4 release. ++ * Security fixes ++ - S8004933, CVE-2012-3174: Improve MethodHandle interaction with libraries ++ - S8006017, CVE-2013-0422: Improve lookup resolutions ++ - S8006125: Update MethodHandles library interactions ++ * Loosen OpenGL dependency. Closes: #695028. ++ * Fix error parsing drop files parameter from pcmanfm (Alberto Fernández ++ Martínez). Closes: #695992. ++ ++ -- Matthias Klose Wed, 16 Jan 2013 11:46:20 +0100 ++ ++openjdk-7 (7u3-2.1.3-1) unstable; urgency=low ++ ++ * IcedTea7 2.1.3 release. ++ * Security fixes ++ - S6631398, CVE-2012-3216: FilePermission improved path checking. ++ - S7093490: adjust package access in rmiregistry. ++ - S7143535, CVE-2012-5068: ScriptEngine corrected permissions. ++ - S7158796, CVE-2012-5070: Tighten properties checking in EnvHelp. ++ - S7158807: Revise stack management with volatile call sites. ++ - S7163198, CVE-2012-5076: Tightened package accessibility. ++ - S7167656, CVE-2012-5077: Multiple Seeders are being created. ++ - S7169884, CVE-2012-5073: LogManager checks do not work correctly for ++ sub-types. ++ - S7169887, CVE-2012-5074: Tightened package accessibility. ++ - S7169888, CVE-2012-5075: Narrowing resource definitions in JMX RMI ++ connector. ++ - S7172522, CVE-2012-5072: Improve DomainCombiner checking. ++ - S7186286, CVE-2012-5081: TLS implementation to better adhere to RFC. ++ - S7189103, CVE-2012-5069: Executors needs to maintain state. ++ - S7189490: More improvements to DomainCombiner checking. ++ - S7189567, CVE-2012-5085: java net obselete protocol. ++ - S7192975, CVE-2012-5071: Issue with JMX reflection. ++ - S7195194, CVE-2012-5084: Better data validation for Swing. ++ - S7195549, CVE-2012-5087: Better bean object persistence. ++ - S7195917, CVE-2012-5086: XMLDecoder parsing at close-time should be ++ improved. ++ - S7195919, CVE-2012-5979: (sl) ServiceLoader can throw CCE without ++ needing to create instance. ++ - S7196190, CVE-2012-5088: Improve method of handling MethodHandles. ++ - S7198296, CVE-2012-5089: Refactor classloader usage. ++ - S7158801: Improve VM CompileOnly option. ++ - S7158804: Improve config file parsing. ++ - S7198606, CVE-2012-4416: Improve VM optimization. ++ * Backports ++ - S7175845: "jar uf" changes file permissions unexpectedly. ++ - S7177216: native2ascii changes file permissions of input file. ++ - S7106773: 512 bits RSA key cannot work with SHA384 and SHA512. ++ - S7158800: Improve storage of symbol tables. ++ ++ -- Matthias Klose Wed, 17 Oct 2012 16:03:47 +0200 ++ ++openjdk-7 (7u3-2.1.2-2ubuntu1) precise-security; urgency=low ++ ++ * Make the avian VM a known runtime. ++ ++ -- Matthias Klose Sat, 08 Sep 2012 16:01:31 +0200 ++ ++openjdk-7 (7u3-2.1.2-2) unstable; urgency=high ++ ++ * Pass -avoid-version to libtool to create a JamVM libjvm.so without SONAME ++ version numbers to match the Hotspot Server/Client libjvm.so. LP: #850433. ++ ++ -- Matthias Klose Tue, 04 Sep 2012 17:55:43 +0200 ++ ++openjdk-7 (7u3-2.1.2-1) unstable; urgency=high ++ ++ * IcedTea7 2.1.2 release. ++ * Security fixes ++ - CVE-2012-4681, S7162473: Reintroduce PackageAccessible checks removed ++ in 6788531. ++ - S7162476, CVE-2012-1682: XMLDecoder security issue via ClassFinder. ++ - S7194567, CVE-2012-3136: Improve long term persistence of java.beans ++ objects. ++ - S7163201, CVE-2012-0547: Simplify toolkit internals references. ++ * d/p/hotspot-sparc.diff: Remove, integrated upstream. ++ * Stop running the mauve tests. ++ ++ -- Matthias Klose Mon, 03 Sep 2012 23:17:11 +0200 ++ ++openjdk-7 (7~u3-2.1.1-3) unstable; urgency=low ++ ++ * d/rules: Ensure we don't remove -02 (default) when -03 is disabled ++ (fix jamvm FTBFS on armhf without -02). ++ * d/patches/gcc-jdk-opt-O0.diff, d/patches/gcc-jdk-opt-O2.diff, ++ d/patches/gcc-no-hardening.diff, d/patches/gcc-opt-O2.diff: removed. ++ ++ -- Damien Raude-Morvan Wed, 25 Jul 2012 21:18:15 +0200 ++ ++openjdk-7 (7~u3-2.1.1-2) unstable; urgency=low ++ ++ * d/rules: On Debian Wheezy/Sid bump Build-Depends on libnss3-dev ++ (>= 2:3.13.4) and Depends on libnss3 (>= 2:3.13.4) (ie. with epoch). ++ (Closes: #679465). ++ * d/control: Suggests icedtea-7-plugin instead of icedtea6-plugin ++ (Closes: #680284). ++ * d/patches/7130140-MouseEvent-systemout.diff: Remove "MEvent. CASE!" from ++ console output. (Closes: #679036). ++ * Disable -O3 compile: cause wrong Math.* computations. ++ (Closes: #679292 and Closes: #678228). LP: #1044857. ++ * debian/patches/FreetypeFontScaler_getFontMetricsNative.diff: ++ Fix "OpenJDK returns the text height greater than font size". ++ (Closes: #657854) ++ ++ -- Damien Raude-Morvan Sat, 30 Jun 2012 18:17:51 +0200 ++ ++openjdk-7 (7~u3-2.1.1-1) unstable; urgency=medium ++ ++ * New upstream release with security fixes (Closes: #677486): ++ - S7079902, CVE-2012-1711: Refine CORBA data models ++ - S7110720: Issue with vm config file loadingIssue with ++ vm config file loading ++ - S7143606, CVE-2012-1717: File.createTempFile should be improved ++ for temporary files created by the platform. ++ - S7143614, CVE-2012-1716: SynthLookAndFeel stability improvement ++ - S7143617, CVE-2012-1713: Improve fontmanager layout lookup operations ++ - S7143851, CVE-2012-1719: Improve IIOP stub and tie generation in RMIC ++ - S7143872, CVE-2012-1718: Improve certificate extension processing ++ - S7145239: Finetune package definition restriction ++ - S7152811, CVE-2012-1723: Issues in client compiler ++ - S7157609, CVE-2012-1724: Issues with loop ++ - S7160677: missing else in fix for 7152811 ++ - S7160757, CVE-2012-1725: Problem with hotspot/runtime_classfile ++ - S7165628, CVE-2012-1726: Issues with ++ java.lang.invoke.MethodHandles.Lookup ++ * Patches merged upstream: ++ - debian/patches/arm-thumb-fix.diff ++ - debian/patches/gcc-4.7.diff ++ ++ [ James Page ] ++ * Cherry picked patch from openjdk-6 to fix handling of ++ ICC profiles (LP: #888123, #888129) (Closes: #676351). ++ ++ [ Damien Raude-Morvan ] ++ * Move libgnome2-0, libgnomevfs2-0, libgconf2-4 from Depends of JRE package ++ to Recommends (Closes: #661465). ++ * New jni_md_h_JNIEXPORT_visibility.patch to allow JNIEXPORT definition ++ to work with -fvisibility=hidden. (Closes: #670896). ++ ++ -- Damien Raude-Morvan Mon, 11 Jun 2012 21:01:10 +0200 ++ ++openjdk-7 (7~u3-2.1.1~pre1-2) unstable; urgency=low ++ ++ * Don't mark the -demo package as Multi-Arch same. Closes: #670038. ++ * Build using gcc-4.4 on mips, mipsel. ++ * Build again with older gcj version on s390 (4.6). ++ ++ -- Matthias Klose Thu, 03 May 2012 10:44:20 +0200 ++ ++openjdk-7 (7~u3-2.1.1~pre1-1ubuntu3) precise-proposed; urgency=low ++ ++ * Default to the ARM assembler interpreter instead to JamVM on ++ ARM. LP: #993380. ++ ++ -- Matthias Klose Wed, 02 May 2012 17:58:20 +0200 ++ ++openjdk-7 (7~u3-2.1.1~pre1-1ubuntu2) precise; urgency=low ++ ++ * Use the /usr/bin path for the policytool desktop file. LP: #980205. ++ Closes: #670037. ++ ++ -- Matthias Klose Thu, 12 Apr 2012 23:29:06 +0200 ++ ++openjdk-7 (7~u3-2.1.1~pre1-1ubuntu1) precise; urgency=low ++ ++ * Regenerate the control file. ++ ++ -- Matthias Klose Wed, 11 Apr 2012 07:21:41 +0200 ++ ++openjdk-7 (7~u3-2.1.1~pre1-1) unstable; urgency=low ++ ++ * Update from the IcedTea7-2.1 release branch (20110410). ++ * Install desktop files again, using the common /usr/bin/java ++ interpreter name. ++ * Build-depend on libpng-dev for newer releases. Closes: #662452. ++ * Let dlopen handle finding the libpcsclite library. LP: #898689. ++ * Build-depend on fonts-ipafont-mincho, fixing a build failure in the ++ fontconfig compiler (find out why it breaks ...). ++ * Build using gcc-4.7/gcj-4.7 for sid/wheezy, fix build failure. ++ * Remove `-icedtea' suffix from the release identification. ++ * Fix arm thumb build, update taken from IcedTea6. ++ ++ -- Matthias Klose Tue, 10 Apr 2012 14:11:09 +0200 ++ ++openjdk-7 (7~u3-2.1-4) unstable; urgency=low ++ ++ [ Matthias Klose ] ++ * Don't install the binary fontconfig file. LP: #964303. ++ ++ [ Damien Raude-Morvan ] ++ * Remove libxp-dev check in configure.ac, it's not needed anymore ++ (Closes: #657260) and so drop build dependency on libxp-dev. ++ * Fix FTBFS with glib 2.32 by adding explicit dependency gthread-2.0.pc ++ (Closes: #665666). ++ * Use libpng-dev instead of libpng12-dev for wheezy/sid (Closes: #662453). ++ ++ -- Damien Raude-Morvan Mon, 09 Apr 2012 00:21:20 +0200 ++ ++openjdk-7 (7~u3-2.1-3) unstable; urgency=low ++ ++ * d/rules,Makefile.am: Improve handling of dpkg-buildflags: don't overwrite ++ CFLAGS of hotspot but use EXTRA_* flags into icedtea and openjdk Makefile. ++ (Closes: #661695). ++ * d/rules: Build everything with -03 opt level (jamvm, cacao and jdk) ++ * d/patches/kfreebsd-support-*.diff: Refresh kfreebsd patches and ++ fix FTBFS on k-i386 (ie. at least on a sid VM). ++ * Backport S7104625 as d/patches/icedtea7-forest-jdk_7104625*.patch ++ to check for logging to prevent wasted CPU (Closes: #651423). ++ ++ -- Damien Raude-Morvan Tue, 06 Mar 2012 01:09:09 +0100 ++ ++openjdk-7 (7~u3-2.1-2) unstable; urgency=low ++ ++ [ Matthias Klose ] ++ * Use NanumMyeongjo as the preferred korean font. LP: #792471. ++ * Fix crash in java.net.NetworkInterface.getNetworkInterfaces() when ++ ifr_ifindex exceeds 255. LP: #925218. S7078386. ++ * Use IPAfont as the preferred japanesse font. Closes: #646054. ++ * Build using gcj on alpha and armel. Closes: #655750. ++ ++ [ Damien Raude-Morvan ] ++ * d/patches/sparc-stubgenerator.diff: Fix FTBFS on sparc on ++ stubGenerator_sparc.cpp by using explicit class typedef ++ (Closes: #660871). ++ * d/patches/fix_extra_flags.diff: Improve support for hardened build, ++ also send flags to jdk build and send -Wl,-z,relro during hotspot link. ++ * Bump Standards-Version to 3.9.3: no changes needed. ++ * d/control: Don't use nonexistent dlopenjl:Recommends substvar, ++ replaced by dlopenhl:Recommends. ++ * d/*.{prerm,postrm}: Use set -e inside script instead of sh -e shebang. ++ * Cleanup lintian-overrides. ++ ++ -- Damien Raude-Morvan Wed, 29 Feb 2012 00:52:49 +0100 ++ ++openjdk-7 (7~u3-2.1-1ubuntu2) precise; urgency=low ++ ++ * Make sure that the nss.cfg doesn't mention any library path. ++ LP: #939361, #939419. ++ * Disable the accessibility wrapper, doesn't work yet. LP: #935296. ++ ++ -- Matthias Klose Fri, 24 Feb 2012 15:10:12 +0100 ++ ++openjdk-7 (7~u3-2.1-1ubuntu1) precise; urgency=low ++ ++ [ Damien Raude-Morvan ] ++ * d/patches/jexec.diff: Dropped, uneeded and not compatible with multi-arch. ++ * d/rules: Use dpkg-buildflags to enable hardened build. ++ (Closes: #660021). ++ ++ [ Matthias Klose ] ++ * Merge r522 from openjdk6: ++ - Make upgrades from non-multiarch to multiarch builds more silent. ++ - Fix order of grant decls in java.policy. ++ - Make doc files multi-arch installable. ++ - JB-archive.applications.in: Use /usr/bin/java by default. Maybe ++ should be moved to the default-jdk package. ++ * Explicitly look for the gthread-2.0 pkgconfig module. ++ ++ -- Matthias Klose Wed, 22 Feb 2012 14:07:16 +0100 ++ ++openjdk-7 (7~u3-2.1-1) unstable; urgency=low ++ ++ * Update icedtea7 2.1 (OpenJDK7 ~u3 release): ++ - Check for logging to prevent wasted CPU (Closes: #651423). ++ * Fix following security issues: ++ - S7082299, CVE-2011-3571: Fix in AtomicReferenceArray ++ - S7088367, CVE-2011-3563: Fix issues in java sound ++ - S7110683, CVE-2012-0502: Issues with some KeyboardFocusManager method ++ - S7110687, CVE-2012-0503: Issues with TimeZone class ++ - S7110700, CVE-2012-0505: Enhance exception throwing mechanism in ++ ObjectStreamClass ++ - S7110704, CVE-2012-0506: Issues with some method in corba ++ - S7112642, CVE-2012-0497: Incorrect checking for graphics rendering object ++ - S7118283, CVE-2012-0501: Better input parameter checking in zip file ++ processing ++ - S7126960, CVE-2011-5035: Add property to limit number of request headers ++ to the HTTP Server ++ ++ [ Matthias Klose ] ++ * openjdk-7-jre-lib: Mark as Multi-Arch: foreign. ++ ++ [ Damien Raude-Morvan ] ++ * Merge r501-521 from openjdk6: ++ - Fix plugin name in jinfo file. ++ - Fix build flags for cppInterpreter_arm.o. ++ - Use java-atk-wrapper instead of java-access-bridge for accessibility. ++ - Make the java.policy file multi-arch installable. ++ - Don't install desktop and menu files for multiarch builds. ++ Needs a better solution. ++ - Don't install an alternative for the deprecated apt tool. ++ - Make the upgrade from a non-multiarch installation location more ++ robust; don't depend on version numbers, but check the path of the ++ alternatives. ++ - Disable test for armel and powerpc (broken on buildd) ++ * d/rules: Make symbolic links to src.zip on /usr/lib/jvm/java-7-openjdk-amd64 ++ like openjdk-6-jdk (Closes: #649618). ++ * d/rules: Pass -n to gzip when compressing manpages to be Multi-Arch: same safe. ++ * d/rules: Add build-arch/build-indep target. ++ * d/rules: Re-enable Cacao VM! ++ * d/{rules,control}: Only rhino 1.7R3 is supported by openjdk7, update B-D. ++ * d/patches/hotspot-s390.diff: Update for latest Hotspot. ++ * d/patches/icedtea-patch.diff: Move nssLibraryDirectory handling to d/rules. ++ * d/rules: Remove --with-*-drop-zip options, as code drops are embedded. ++ * d/patches/hsx23-zero.patch, patches/shark-compiler-fixes.patch: ++ Fix FTBFS for Zero under Hotspot >= v22. ++ * d/patches/kfreebsd-*: Refreshed. ++ * d/control: Make openjdk-7-source:all package binNMU-able by using ++ Depends ">=" on openjdk-7-jre (ie. src.zip won't change). ++ ++ -- Damien Raude-Morvan Wed, 15 Feb 2012 20:55:52 +0100 ++ ++openjdk-7 (7~b147-2.0-1) unstable; urgency=low ++ ++ * New upstream IcedTea7 release. ++ - S7000600, CVE-2011-3547: InputStream skip() information leak. ++ - S7019773, CVE-2011-3548: mutable static AWTKeyStroke.ctor. ++ - S7023640, CVE-2011-3551: Java2D TransformHelper integer overflow. ++ - S7032417, CVE-2011-3552: excessive default UDP socket limit under ++ SecurityManager. ++ - S7046794, CVE-2011-3553: JAX-WS stack-traces information leak. ++ - S7046823, CVE-2011-3544: missing SecurityManager checks in scripting ++ engine. ++ - S7055902, CVE-2011-3521: IIOP deserialization code execution. ++ - S7057857, CVE-2011-3554: insufficient pack200 JAR files uncompress error ++ checks. ++ - S7064341, CVE-2011-3389: HTTPS: block-wise chosen-plaintext attack ++ against SSL/TLS (BEAST). ++ - S7070134, CVE-2011-3558: HotSpot crashes with sigsegv from PorterStemmer. ++ - S7077466, CVE-2011-3556: RMI DGC server remote code execution. ++ - S7083012, CVE-2011-3557: RMI registry privileged code execution. ++ - S7096936, CVE-2011-3560: missing checkSetFactory calls in ++ HttpsURLConnection. ++ ++ [ Matthias Klose ] ++ * Merge debian packaging r501 from openjdk-6: ++ - Tighten inter-package dependencies for Debian builds. Closes: #641240. ++ * Build-depend on wdiff. ++ ++ -- Damien Raude-Morvan Sat, 01 Oct 2011 10:53:15 +0200 ++ ++openjdk-7 (7~b147-2.0~pre6-2) unstable; urgency=low ++ ++ * d/rules: Fix java.policy to include jre/lib/ext/* files (instead of ++ non-existant ext/*). It'll restore privilegied access from sunpkcs11.jar ++ to sun.* code. ++ * d/patches/s390_hotspot_fix.diff: Update to fix FTBFS on s390. ++ ++ -- Damien Raude-Morvan Sat, 01 Oct 2011 10:53:15 +0200 ++ ++openjdk-7 (7~b147-2.0~pre6-1) unstable; urgency=low ++ ++ * Update to IcedTea7 (20110928). ++ ++ [ Matthias Klose ] ++ * Merge debian packaging r496 from openjdk-6: ++ - Fix dangling java-1.7.0-openjdk symlink for non-multiarch builds. ++ ++ [ Damien Raude-Morvan ] ++ * d/rules: --disable-compile-against-syscalls for kFreeBSD (since there is ++ no epoll support). ++ * Update patches: ++ - d/patches/sun-awt-buildsystem.diff: Drop, merged upstream. ++ - d/patches/icedtea-override-redirect-compiz.patch: Refresh. ++ - d/patches/s390_hotspot_fix.diff: Extracted (instead of direct patch). ++ * Add Build-Depends on libattr1-dev. ++ ++ -- Damien Raude-Morvan Wed, 28 Sep 2011 17:19:33 +0200 ++ ++openjdk-7 (7~b147-2.0~pre5-1) unstable; urgency=low ++ ++ * Update to IcedTea7 (20110914). ++ - d/patches/jdk-no-mapfile.diff: Drop, merged in icedtea7-forest ++ (for real this time). ++ - d/patches/sun-awt-buildsystem.diff: Fix icedtea7-forest awt build. ++ * d/patches/kfreebsd-support-jdk.diff: Refresh. ++ * d/patches/icedtea-patch.diff: Remove usage of nssLibraryDirectory because ++ while it works for nss3, it fails for softokn3 (since the latter is ++ in nss/ subdirectory). Without this parameter, openjdk-7 will rely on ++ default ld.so behavior. (Closes: #637337, #638008) ++ In openjdk-6, nssLibraryDirectory was not used to load softokn3 (Secmod). ++ * d/control: openjdk-7-jre Provides java7-runtime, openjdk-7-jre-headless ++ Provides java7-runtime-headless and openjdk-7-jdk Provides java7-jdk. ++ (Closes: #641668). ++ ++ -- Damien Raude-Morvan Mon, 19 Sep 2011 16:33:15 +0200 ++ ++openjdk-7 (7~b147-2.0~pre4-1) unstable; urgency=low ++ ++ * Update to IcedTea7 (20110906): ++ - JamVM: support for armhf and other various fixes. ++ * Upload to unstable. ++ * Regenerate control file for debian unstable. ++ * Makefile.am: Force JAVA_HOME for ant call to --with-jdk-home value (without ++ this, it defaults to existing JAVA_HOME env or /usr/bin/java link) ++ ++ -- Damien Raude-Morvan Mon, 05 Sep 2011 23:50:43 +0200 ++ ++openjdk-7 (7~b147-2.0~pre3-3ubuntu2) oneiric; urgency=low ++ ++ * Merge debian packaging r491 from openjdk-6: ++ - Move the -lib files into a different location so that the java-7-openjdk ++ name can be used as a symlink. ++ - Symlink the jre/cmm directory, instead of the files inside. Closes: #639883. ++ ++ -- Matthias Klose Thu, 01 Sep 2011 21:08:45 +0200 ++ ++openjdk-7 (7~b147-2.0~pre3-3ubuntu1) oneiric; urgency=low ++ ++ * Regenerate the control file. ++ ++ -- Matthias Klose Sun, 28 Aug 2011 23:42:18 +0200 ++ ++openjdk-7 (7~b147-2.0~pre3-3) experimental; urgency=low ++ ++ * Merge debian packaging r485:489 from openjdk-6: ++ - Build using GCC-4.4 on sparc and sparc64. ++ - Enable testsuite runs in s390x. ++ * Merge debian packaging r490 from openjdk-6: ++ - Set plugin name for the jinfo file. Closes: #638548, ++ - Disable the mauve testsuite on i386. ++ - Make the installation multiarch aware. ++ ++ -- Matthias Klose Sun, 28 Aug 2011 20:42:54 +0200 ++ ++openjdk-7 (7~b147-2.0~pre3-2) experimental; urgency=low ++ ++ * d/patches/jdk-no-mapfile.diff: Re-add was not merged into ++ current (e46d527097f1) revision but latter. ++ ++ -- Damien Raude-Morvan Mon, 22 Aug 2011 00:11:33 +0200 ++ ++openjdk-7 (7~b147-2.0~pre3-1) experimental; urgency=low ++ ++ * Update to IcedTea7 (20110821): ++ - JamVM updates. ++ - S7070134,S7044738,S7068051,S7073913: Fix random segfaults ++ and related invalid results from loop unroll optimization. ++ - d/patches/jdk-no-mapfile.diff: Drop, merged in icedtea7-forest. ++ ++ [ Matthias Klose ] ++ * Build using GCC-4.4 on mips/mipsel. Closes: #628620. ++ * Merge debian packaging r482:485 from openjdk-6: ++ - Call dbus-launch --exit-with-session in testsuite. Closes: #612394. ++ - Build for s390x using Zero. ++ ++ [ Damien Raude-Morvan ] ++ * d/patches/kfreebsd-support-hotspot.diff: Add workaround ++ to handle #637378. ++ * d/generate-dfsg-zip.sh: Update to also handle langtools.tar.gz. ++ Closes: #623693. ++ ++ -- Damien Raude-Morvan Sun, 21 Aug 2011 20:08:50 +0200 ++ ++openjdk-7 (7~b147-2.0~pre2-3) experimental; urgency=low ++ ++ * d/patches/kfreebsd-support-hotspot.diff: Fix access to CPU registry under ++ kfreebsd-amd64. ++ ++ -- Damien Raude-Morvan Sun, 07 Aug 2011 12:22:47 +0200 ++ ++openjdk-7 (7~b147-2.0~pre2-2) experimental; urgency=low ++ ++ * d/patches/kfreebsd-support-jamvm.diff: Add support for kfreebsd-amd64. ++ * d/patches/kfreebsd-support-hotspot.diff: Small fixes for Hotspot on ++ kfreebsd-i386. ++ * Split d/patches/hotspot-s390.diff and zero-missing-headers.diff. ++ * Re-add missing changes from last upload: ++ - patches/use-idx_t.patch: Edit upstream patch to avoid FTBFS on s390. ++ - Makefile.{am,im}: Force bootclasspath (useful when building from ++ openjdk-6). ++ ++ -- Damien Raude-Morvan Sat, 06 Aug 2011 23:50:58 +0200 ++ ++openjdk-7 (7~b147-2.0~pre2-1) experimental; urgency=low ++ ++ * Update to icedtea7-forest snapshot (20110804): ++ - d/patches/pr753.diff: drop, merged in icedtea7-forest. ++ - d/patches/pr757.diff: drop, merged in icedtea7-forest. ++ - d/patches/zero-jsr292-fixes.diff: drop, merged in icedtea7-forest. ++ - d/patches/no-compiler-path.diff: drop, now handled correctly icedtea7's ++ configure and openjdk's Makefile (by CC and CXX environment variables). ++ - Updated JamVM to the 2011-08-01 revision. ++ ++ [ Damien Raude-Morvan ] ++ * d/patches/zero-fpu-control-is-noop.diff: Remove ShouldNotCallThis from ++ os_linux_zero.cpp (fix crash under i386). ++ * d/rules: Enable support for GNU/kFreeBSD arch: ++ - d/patches/kfreebsd-support-*: Update with latest fixes. ++ - d/patches/kfreebsd-sync-issues.diff: hack to force some wait ++ until we fix sync issues. ++ - d/rules: Enable shark for GNU/kFreeBSD. ++ * d/rules: Use DEB_HOST_ARCH_CPU for jvmarch/archdir. Thanks to ++ Jérémie Koenig for patch. ++ * d/patches/jexec.diff: Update for openjdk-7. ++ * d/JB-jdk.overrides.in: Fix override for new Lintian 2.5.0 path handling. ++ * d/icedtea-7-jre-jamvm.overrides: As for others libjvm.so, we use ++ --strip-debug instead of --strip-unneeded. ++ * d/source.lintian-overrides: Drop, not used anymore in openjdk-7. ++ ++ [ Matthias Klose ] ++ * Merge debian packaging r472:482 from openjdk-6: ++ - openjdk-6-jre-headless: Depend on icedtea-6-jre-jamvm, if it's ++ the default VM. ++ - Use gcj-4.4 as the stage1 java VM on mips and mipsel. ++ - Make JamVM the default VM on Ubuntu oneiric/ARM. ++ ++ -- Matthias Klose Thu, 04 Aug 2011 11:38:01 +0200 ++ ++openjdk-7 (7~b147-2.0~pre1-1) experimental; urgency=low ++ ++ * New b147 code drop (OpenJDK7 RC1). ++ ++ [ Matthias Klose ] ++ * Fix build on sparc64. ++ * Recognize 32bit user space on sparc. ++ * Build shark using llvm-2.9. ++ ++ [ Damien Raude-Morvan ] ++ * d/patches/zero-jsr292-fixes.diff: Fixes on Zero/Shark for JSR 292 support ++ from Chris Phillips . ++ * d/generate-dfsg-zip.sh: Update for OpenJDK7 as a first step to get #623693 ++ fixed. ++ * d/patches/kfreebsd-*: WiP patches for GNU/kFreeBSD support ++ (not yet enabled by default). ++ ++ -- Matthias Klose Sun, 17 Jul 2011 16:08:51 +0200 ++ ++openjdk-7 (7~b143-2.0~pre1-2) experimental; urgency=low ++ ++ * Upload to experimental. ++ ++ -- Matthias Klose Tue, 12 Jul 2011 14:30:01 +0200 ++ ++openjdk-7 (7~b143-2.0~pre1-1ubuntu1) oneiric; urgency=low ++ ++ * Fix zero builds on non-ix86 architectures. ++ * Fix build on sparc. ++ * Build using jpeg8. ++ ++ -- Matthias Klose Tue, 12 Jul 2011 14:25:24 +0200 ++ ++openjdk-7 (7~b143-2.0~pre1-1) experimental; urgency=low ++ ++ [ Damien Raude-Morvan ] ++ * New b143 code drop. ++ * Drop d/patches/7031385.diff: Merged upstream. ++ * Drop d/patches/jamvm-oj7.patch: Merged upstream. ++ * Manpages are now ja_JP.UTF-8 instead of ja_JP.eucJP ++ ++ [ Matthias Klose ] ++ * Apply fix for IcedTea issue #753, #757. ++ * Update s390 hotspot build fixes. ++ * Re-enable zero on i386. ++ ++ -- Matthias Klose Sun, 10 Jul 2011 14:28:17 +0200 ++ ++openjdk-7 (7~b136-2.0~pre1-2) experimental; urgency=low ++ ++ * Disable zero on i386. ++ ++ -- Matthias Klose Sun, 29 May 2011 12:37:03 +0200 ++ ++openjdk-7 (7~b136-2.0~pre1-1ubuntu1) oneiric; urgency=low ++ ++ * Upload to oneiric. ++ ++ -- Matthias Klose Sun, 29 May 2011 07:59:01 +0200 ++ ++openjdk-7 (7~b136-2.0~pre1-1) experimental; urgency=low ++ ++ [ Matthias Klose ] ++ * Fix non-bootstrap builds. ++ * Merge debian packaging r469:472 from openjdk-6. ++ * Run jtreg tests using JamVM too. ++ * Don't run the jtreg tests with the NSS security provider enabled. ++ * Update JamVM to 20110528. ++ * Re-enable the zero build, keep the shark builds disabled. ++ ++ [ Damien Raude-Morvan ] ++ * Only apply jamvm-oj7.patch when jamvm build in enabled. ++ ++ -- Matthias Klose Sat, 28 May 2011 17:17:23 +0200 ++ ++openjdk-7 (7~b136-1.14+debian1-1) experimental; urgency=low ++ ++ * New upstream release: Icedtea 1.14. ++ - debian/patches/jamvm-oj7.patch: support new instruction ++ (JVM_FindClassFromBootLoader) in JamVM. ++ - Makefile.am: Fix some missing depends between ++ patch and extract targets. ++ * debian/patches/nonreparenting-wm.diff: Update. ++ * Replace B-D on libxalan2-java by xsltproc for bootstrapping JMVTI. ++ * Don't use GCJ_SUFFIX=4.6 for sid/wheezy/oneiric as GCJ version ++ is not homogeneous between arch. ++ * Enable JamVM support: ++ - d/control: Add B-D on libtool. ++ ++ -- Damien Raude-Morvan Thu, 26 May 2011 23:03:56 +0200 ++ ++openjdk-7 (7~b136-1.14~pre0-4) experimental; urgency=low ++ ++ * Re-add build dependency on fastjar. ++ * Fix dependency on liblcms2-2. ++ ++ -- Matthias Klose Sun, 08 May 2011 10:21:21 +0200 ++ ++openjdk-7 (7~b136-1.14~pre0-3) experimental; urgency=low ++ ++ * Fix liblcms dependency for -jre-headless package. ++ ++ -- Damien Raude-Morvan Sat, 07 May 2011 17:20:15 +0200 ++ ++openjdk-7 (7~b136-1.14~pre0-2ubuntu2) oneiric; urgency=low ++ ++ * Re-add build dependency on fastjar. ++ ++ -- Matthias Klose Sun, 08 May 2011 02:51:47 +0200 ++ ++openjdk-7 (7~b136-1.14~pre0-2) experimental; urgency=low ++ ++ * Fix build failure on i386 with GCC 4.6. ++ ++ -- Matthias Klose Fri, 06 May 2011 17:10:00 +0200 ++ ++openjdk-7 (7~b136-1.14~pre0-1) experimental; urgency=low ++ ++ [ Damien Raude-Morvan ] ++ * New b136 code drop: ++ - d/rules: Use jaxp-1_4_5-dev1.zip as jaxp-drop-zip. ++ - d/patches/icedtea-pretend-memory.diff: Refreshed. ++ ++ [ Matthias Klose ] ++ * Fix -jre-lib dependency on -jre. Closes: #624846. ++ * Add lcms configury. ++ ++ -- Matthias Klose Thu, 05 May 2011 21:08:55 +0200 ++ ++openjdk-7 (7~b130-1.14~pre0-2) experimental; urgency=low ++ ++ * Remove obsolete conflicts. Closes: #624090. ++ * Add copyright for the rewriter class. Addresses part of #623693. ++ * Lower priorities for the alternatives below these of OpenJDK 6, ++ as long as OpenJDK 7 is not yet released. ++ * Don't build HotSpot with -Werror on architectures other than amd64 ++ and i386. ++ ++ -- Matthias Klose Wed, 27 Apr 2011 23:03:45 +0200 ++ ++openjdk-7 (7~b130-1.14~pre0-1) experimental; urgency=low ++ ++ * New b130 code drop. ++ * Merge debian packaging r464:469 from openjdk-6. ++ * Do not bump the epoch, package was never uploaded to any official ++ repository. ++ ++ -- Matthias Klose Wed, 20 Apr 2011 21:46:32 +0200 ++ ++openjdk-7 (1:7~b129-1.14~pre0-1) experimental; urgency=low ++ ++ * New b129 code drop. ++ * Bump epoch to 1 and use ~ to indicate that's not openjdk-7 final relaase. ++ ++ -- Damien Raude-Morvan Mon, 14 Feb 2011 00:34:45 +0100 ++ ++openjdk-7 (7b128-1.14~pre0-1) UNRELEASED; urgency=low ++ ++ * New b128 code drop. ++ * Exclude "release" file for dh_install. ++ ++ -- Damien Raude-Morvan Mon, 07 Feb 2011 23:45:13 +0100 ++ ++openjdk-7 (7b126-1.14~pre0-1) UNRELEASED; urgency=low ++ ++ * Merge debian packaging r446:464 from openjdk-6 ++ but keep the following changes : ++ - Replace DISABLE_PRECOMPILED_HEADER=1 by USE_PRECOMPILED_HEADER=0 ++ - Use "ant, ant-optionnal" for all distrel ++ - Drop "with_hotspot" variable (only one hotspot supported by IcedTea7) ++ - Drop --enable-xrender (not supported by IcedTea7) ++ * New b125 code drop: ++ - d/rules: Use new archives by --with-*-src-zip ++ * Refresh patches: ++ - d/patches/shebang.diff: Year updated ++ - d/patches/ld-symbolic-functions.diff and ++ d/patches/no-compiler-path.diff: Changed corba generic Makefiles. ++ - d/patches/default-jvm-cfg.diff and d/patches/set-exec-name.diff: ++ Upstream merged "solaris" and "linux" java.c and java_md.c ++ * Drop patches: ++ - d/patches/too-many-args-ftbfs.diff (merged upstream) ++ - d/patches/sparc.diff (merges upstream) ++ - d/patches/hotspot-include-fixes.diff (includeDB dropped upstream) ++ ++ -- Damien Raude-Morvan Mon, 24 Jan 2011 00:18:38 +0100 ++ ++openjdk-7 (7b106~pre1-0lucid2) lucid; urgency=low ++ ++ * Build for lucid. ++ ++ -- Matthias Klose Thu, 09 Sep 2010 15:32:13 +0200 ++ ++openjdk-7 (7b106~pre1-0ubuntu2) maverick; urgency=low ++ ++ * Build openjdk-7 snapshot (7b106) ++ * Symlink timezone data. ++ * Disable shark builds, currently broken in 7b106. ++ ++ -- Matthias Klose Tue, 07 Sep 2010 04:56:48 +0200 ++ ++openjdk-7 (7b89-1.13-0ubuntu1) maverick; urgency=low ++ ++ * Update to the IcedTea 1.13 release. ++ * openjdk-7-jre: Recommend ttf-dejavu-extra. LP: #569396. ++ ++ -- Matthias Klose Fri, 30 Jul 2010 01:19:45 +0200 ++ ++openjdk-7 (7b89~pre1-0lucid3) lucid; urgency=low ++ ++ * Include docs in the -doc package. LP: #600834. ++ * Update from the IcedTea6 trunk. ++ - Plugin and netx fixes. ++ - Don't link the plugin against the libxul libraries. Closes: #576361. ++ - More plugin cpu usage fixes. Closes: #584335, #587049. ++ - Plugin: fixes AppletContext.getApplets(). ++ * Fix Vcs-Bzr location. Closes: #530883. ++ * Search for unversioned llvm-config tool. ++ * Don't set XFILESEARCHPATH and NLSPATH on startup. LP: #586641. ++ * Fix chinese font metrics and prefer using 'WenQuanYi Micro Hei' font. ++ LP: #472845. ++ * Strip libjvm.so with --strip-debug instead of --strip-unneeded. ++ LP: #574997. ++ * Fix inter-package dependencies. ++ ++ -- Matthias Klose Thu, 15 Jul 2010 18:12:17 +0200 ++ ++openjdk-7 (7b89~pre1-0lucid1) lucid; urgency=low ++ ++ [ Damien Raude-Morvan ] ++ * Merge debian packaging r403:430 from openjdk-6. ++ * Add myself to Uploaders. ++ * Build openjdk-7 snapshot (7b89) ++ * Use ant+ant-optional (IcedTea7 support ant 1.8). ++ * Merge debian packaging r431:436 from openjdk-6. ++ ++ [ Matthias Klose ] ++ * Merge debian packaging r430:445 from openjdk-6. ++ * Update debian patches to 7b89. ++ * Reenable the two stage build. ++ * Reenable building cacao. ++ * Reenable building zero. ++ ++ -- Matthias Klose Tue, 13 Jul 2010 10:32:11 +0200 ++ ++openjdk-7 (7b77-0ubuntu1~ppa1) lucid; urgency=low ++ ++ * Build openjdk-7 snapshot (7b77). ++ * Merge debian packaging r391:403 from openjdk-6. ++ ++ -- Matthias Klose Mon, 21 Dec 2009 16:58:34 +0100 ++ ++openjdk-7 (7b72-0ubuntu1~ppa1) karmic; urgency=low ++ ++ * Build openjdk-7 snapshot (7b72). ++ * Merge debian packaging r371:391 from openjdk-6. ++ * Disable the zero build for now. ++ ++ -- Matthias Klose Sat, 03 Oct 2009 16:35:27 +0200 ++ ++openjdk-7 (7b66-0ubuntu1~ppa1) karmic; urgency=low ++ ++ * Build openjdk-7 snapshot (7b66). ++ * Merge debian packaging r362:371 from openjdk-6. ++ ++ -- Matthias Klose Thu, 06 Aug 2009 12:40:00 +0200 ++ ++openjdk-7 (7b59-0ubuntu1) karmic; urgency=low ++ ++ * Reenable the build of zero. ++ * Reapply fontconfig patch. ++ * Apply icedtea-cacao-no-mmap-first-page patch. ++ ++ -- Matthias Klose Fri, 29 May 2009 10:19:26 +0200 ++ ++openjdk-7 (7b59-0ubuntu1~ppa1) karmic; urgency=low ++ ++ * Build openjdk-7 snapshot (7b59). ++ * Merge debian packaging r205:362 from openjdk-6. ++ ++ -- Matthias Klose Wed, 27 May 2009 12:09:16 +0200 ++ ++openjdk-7 (7b40-0ubuntu2) jaunty; urgency=low ++ ++ * Add build dependency on libxrender-dev. ++ * Don't use fastjar on ia64, working around a build failure. ++ * Add configury for shark builds. ++ ++ -- Matthias Klose Thu, 04 Dec 2008 16:26:15 +0100 ++ ++openjdk-7 (7b40-0ubuntu1) jaunty; urgency=low ++ ++ * Build openjdk-7 snapshot (7b40). ++ * Update packaging for openjdk-7. ++ ++ -- Matthias Klose Tue, 02 Dec 2008 14:27:03 +0100 ++ ++openjdk-6 (6b22-1.10.1-0ubuntu1) natty; urgency=low ++ ++ * IcedTea6 1.10.1 release. ++ ++ -- Matthias Klose Tue, 05 Apr 2011 12:20:36 +0200 ++ ++openjdk-6 (6b22-1.10.1~pre1-1) experimental; urgency=low ++ ++ * Upload to experimental. ++ ++ -- Matthias Klose Sat, 26 Mar 2011 10:43:49 +0100 ++ ++openjdk-6 (6b22-1.10.1~pre1-0ubuntu1) natty; urgency=low ++ ++ * Update from the IcedTea6-1.10 release branch (20110325). ++ * Add multiarch directories to the default library path. LP: #737603. ++ ++ -- Matthias Klose Fri, 25 Mar 2011 16:33:57 +0100 ++ ++openjdk-6 (6b22-1.10-3) experimental; urgency=low ++ ++ * Fix JamVM build on mips/mipsel (Robert Lougher). ++ * Re-enable the JamVM build on mips/mipsel. ++ ++ -- Matthias Klose Sun, 06 Mar 2011 15:01:54 +0100 ++ ++openjdk-6 (6b22-1.10-2) experimental; urgency=low ++ ++ * Mention that IcedTea is copyrigh GPLv2 + "CLASSPATH" EXCEPTION. ++ Closes: #611269. ++ * Don't run the jdk checks for the alternate builds (hotspot and ++ langtools checks are still run). ++ * Disable the JamVM build on mips/mipsel. ++ ++ -- Matthias Klose Sat, 05 Mar 2011 16:13:40 +0100 ++ ++openjdk-6 (6b22-1.10-1) experimental; urgency=low ++ ++ * Upload to experimental. ++ ++ -- Matthias Klose Fri, 04 Mar 2011 16:12:50 +0100 ++ ++openjdk-6 (6b22-1.10-0ubuntu2) natty; urgency=low ++ ++ * Disable the jdk tests with the Shark, JamVM and Cacao VMs. ++ ++ -- Matthias Klose Fri, 04 Mar 2011 15:32:50 +0100 ++ ++openjdk-6 (6b22-1.10-0ubuntu1) natty; urgency=low ++ ++ * IcedTea6 1.10 release. ++ ++ -- Matthias Klose Thu, 03 Mar 2011 09:32:19 +0100 ++ ++openjdk-6 (6b21~pre3-0ubuntu1) natty; urgency=low ++ ++ * Update from the IcedTea6 trunk (20110224). ++ * icedtea-6-jre-jamvm: Build JamVM as an alternative VM, ++ start with `java -jamvm'. ++ ++ -- Matthias Klose Thu, 24 Feb 2011 02:45:56 +0100 ++ ++openjdk-6 (6b21~pre2-0ubuntu1) natty; urgency=low ++ ++ * Update from the IcedTea6 trunk (20110217). ++ * Update hotspot hs20 (not yet enabled). ++ * Add ppc64 packaging bits. ++ ++ -- Matthias Klose Fri, 18 Feb 2011 15:55:41 +0100 ++ ++openjdk-6 (6b21~pre1-1) experimental; urgency=low ++ ++ * Upload to experimental. ++ ++ -- Matthias Klose Sat, 22 Jan 2011 15:34:21 +0100 ++ ++openjdk-6 (6b21~pre1-0ubuntu1) natty; urgency=low ++ ++ * Update to 6b21. ++ ++ -- Matthias Klose Sat, 22 Jan 2011 14:28:28 +0100 ++ ++openjdk-6 (6b20-1.10~pre3-0ubuntu4) natty; urgency=low ++ ++ * Fix shark build on powerpc. ++ ++ -- Matthias Klose Fri, 24 Dec 2010 11:06:32 +0100 ++ ++openjdk-6 (6b20-1.10~pre3-0ubuntu3) natty; urgency=low ++ ++ * Update from the IcedTea6 trunk (20101223). ++ ++ -- Matthias Klose Thu, 23 Dec 2010 14:27:17 +0100 ++ ++openjdk-6 (6b20-1.10~pre3-0ubuntu1) natty; urgency=low ++ ++ * Update from the IcedTea6 trunk (20101126). ++ * Update hotspot hs19. ++ * Fix build failures on ia64, s390 and sparc64. ++ ++ -- Matthias Klose Fri, 26 Nov 2010 16:38:16 +0100 ++ ++openjdk-6 (6b20-1.10~pre2-0ubuntu7~ppa1) natty; urgency=low ++ ++ * Reenable shark on amd64, but build using llvm-2.7. ++ ++ -- Matthias Klose Thu, 21 Oct 2010 23:59:30 +0200 ++ ++openjdk-6 (6b20-1.10~pre2-0ubuntu5) natty; urgency=low ++ ++ * Don't try to set up an alternative for javaws ++ ++ -- Matthias Klose Thu, 21 Oct 2010 17:38:48 +0200 ++ ++openjdk-6 (6b20-1.10~pre2-0ubuntu3) natty; urgency=low ++ ++ * Stop building zero/shark on amd64. Fails the self tests. ++ ++ -- Matthias Klose Thu, 21 Oct 2010 13:49:56 +0200 ++ ++openjdk-6 (6b20-1.10~pre2-0ubuntu2) natty; urgency=low ++ ++ * Don't include OpenJDK's javaws either. ++ * openjdk-6-jre: Recommend icedtea-netx. ++ ++ -- Matthias Klose Thu, 21 Oct 2010 08:34:07 +0200 ++ ++openjdk-6 (6b20-1.10~pre2-0ubuntu1) natty; urgency=low ++ ++ * Update from the IcedTea6 trunk (20101020). ++ * Remove the plugin and javaws from the packaging, removed upstream. ++ ++ -- Matthias Klose Wed, 20 Oct 2010 17:45:09 +0200 ++ ++openjdk-6 (6b20-1.10~pre1-0ubuntu2) natty; urgency=low ++ ++ * Build with hotspot 19. ++ ++ -- Matthias Klose Thu, 14 Oct 2010 11:28:38 +0200 ++ ++openjdk-6 (6b20-1.10~pre1-0ubuntu1) natty; urgency=low ++ ++ * Snapshot, taken from the IcedTea6 trunk (20101013). ++ ++ -- Matthias Klose Wed, 13 Oct 2010 08:36:44 +0200 ++ ++openjdk-6 (6b20-1.9.2-1) experimental; urgency=low ++ ++ * Upload to experimental. ++ ++ -- Matthias Klose Wed, 24 Nov 2010 05:29:43 +0100 ++ ++openjdk-6 (6b20-1.9.2-0ubuntu1) maverick-security; urgency=low ++ ++ * IcedTea6 1.9.2 release. ++ - CVE-2010-3860: Fix IcedTea System property information leak via ++ public static. ++ * Build using Hotspot hs19. ++ * Start metacity using dbus-launch, when running the testsuite. LP: #632594. ++ ++ -- Matthias Klose Sun, 21 Nov 2010 18:30:39 +0100 ++ ++openjdk-6 (6b20-1.9.1-1ubuntu3) maverick-security; urgency=low ++ ++ * Move all japanese man pages belonging to the jre into the -jre package. ++ Closes: #600765. ++ * Add -jdk replaces for -jre and -jre-headless. Closes: #600809. ++ ++ -- Matthias Klose Wed, 20 Oct 2010 12:51:34 +0200 ++ ++openjdk-6 (6b20-1.9.1-1ubuntu1) maverick-security; urgency=low ++ ++ * Fix upgrade to symlinked timezone data. Closes: #600359. ++ * Move all japanese man pages belonging to the jre into the -jre package. ++ Closes: #600765. ++ ++ -- Matthias Klose Mon, 18 Oct 2010 16:07:48 +0200 ++ ++openjdk-6 (6b20-1.9.1-1) experimental; urgency=low ++ ++ * Upload to experimental. ++ ++ -- Matthias Klose Tue, 12 Oct 2010 15:07:35 +0200 ++ ++openjdk-6 (6b20-1.9.1-0ubuntu1) maverick-security; urgency=low ++ ++ * IcedTea6 1.9.1 release. ++ - Security updates: ++ - S6914943, CVE-2009-3555: TLS: MITM attacks via session renegotiation. ++ - S6559775, CVE-2010-3568: OpenJDK Deserialization Race condition. ++ - S6891766, CVE-2010-3554: OpenJDK corba reflection vulnerabilities. ++ - S6925710, CVE-2010-3562: OpenJDK IndexColorModel double-free. ++ - S6938813, CVE-2010-3557: OpenJDK Swing mutable static. ++ - S6957564, CVE-2010-3548: OpenJDK DNS server IP address information ++ leak. ++ - S6958060, CVE-2010-3564: OpenJDK kerberos vulnerability. ++ - S6963023, CVE-2010-3565: OpenJDK JPEG writeImage remote code execution. ++ - S6963489, CVE-2010-3566: OpenJDK ICC Profile remote code execution. ++ - S6966692, CVE-2010-3569: OpenJDK Serialization inconsistencies. ++ - S6622002, CVE-2010-3553: UIDefault.ProxyLazyValue has unsafe ++ reflection usage. ++ - S6623943: javax.swing.TimerQueue's thread occasionally fails to start. ++ - S6925672, CVE-2010-3561: Privileged ServerSocket.accept allows ++ receiving connections from any host. ++ - S6952017, CVE-2010-3549: HttpURLConnection chunked encoding issue ++ (Http request splitting). ++ - S6952603, CVE-2010-3551: NetworkInterface reveals local network ++ address to untrusted code. ++ - S6961084, CVE-2010-3541: limit setting of some request headers in ++ HttpURLConnection. ++ - S6963285, CVE-2010-3567: Crash in ICU Opentype layout engine due to ++ mismatch in character counts. ++ - S6980004, CVE-2010-3573: limit HTTP request cookie headers in ++ HttpURLConnection. ++ - S6981426, CVE-2010-3574: limit use of TRACE method in ++ HttpURLConnection. ++ - Plugin fixes. ++ - Backports from newer IcedTea releases. ++ ++ -- Matthias Klose Tue, 12 Oct 2010 12:13:40 +0200 ++ ++openjdk-6 (6b20-1.9-1) experimental; urgency=low ++ ++ * Upload to experimental. ++ ++ -- Matthias Klose Tue, 07 Sep 2010 21:19:21 +0200 ++ ++openjdk-6 (6b20-1.9-0ubuntu1) maverick; urgency=low ++ ++ * IcedTea6 1.9 release. ++ ++ -- Matthias Klose Tue, 07 Sep 2010 18:13:20 +0200 ++ ++openjdk-6 (6b20~pre2-0ubuntu2) maverick; urgency=low ++ ++ * Update from the IcedTea6 trunk. ++ * Really let the build fail on armel. ++ ++ -- Matthias Klose Fri, 30 Jul 2010 16:55:38 +0200 ++ ++openjdk-6 (6b20~pre2-0ubuntu1) maverick; urgency=high ++ ++ * Update from the IcedTea6 trunk. ++ - (CVE-2010-2783): IcedTea 'Extended JNLP Services' arbitrary file access. ++ - (CVE-2010-2548): IcedTea incomplete property access check for unsigned ++ applications ++ * openjdk-6-jre: Recommend ttf-dejavu-extra. LP: #569396. ++ * Explicitely fail the build on armel. The ARM assembler interpreter is ++ disabled and would a 3-5x performance regression compared to the current ++ 6b18 armel binaries in the archive. ++ ++ -- Matthias Klose Thu, 29 Jul 2010 00:10:53 +0200 ++ ++openjdk-6 (6b20~pre1-2) experimental; urgency=low ++ ++ * Upload to experimental. ++ ++ -- Matthias Klose Thu, 15 Jul 2010 13:55:02 +0200 ++ ++openjdk-6 (6b20~pre1-1ubuntu1) maverick; urgency=low ++ ++ * Include docs in the -doc package. LP: #600834. ++ * Update from the IcedTea6 trunk. ++ - Plugin and netx fixes. ++ - Don't link the plugin against the libxul libraries. Closes: #576361. ++ - More plugin cpu usage fixes. Closes: #584335, #587049. ++ - Plugin: fixes AppletContext.getApplets(). ++ * Fix Vcs-Bzr location. Closes: #530883. ++ * Search for unversioned llvm-config tool. ++ * Don't set XFILESEARCHPATH and NLSPATH on startup. LP: #586641. ++ * Fix chinese font metrics and prefer using 'WenQuanYi Micro Hei' font. ++ LP: #472845. ++ * Strip libjvm.so with --strip-debug instead of --strip-unneeded. ++ LP: #574997. ++ ++ -- Matthias Klose Thu, 15 Jul 2010 12:40:10 +0200 ++ ++openjdk-6 (6b20~pre1-1) experimental; urgency=low ++ ++ * Upload to experimental. ++ ++ -- Matthias Klose Mon, 28 Jun 2010 00:50:58 +0200 ++ ++openjdk-6 (6b20~pre1-0ubuntu2) maverick; urgency=low ++ ++ * Shark & CACAO build fixes. ++ ++ -- Matthias Klose Fri, 25 Jun 2010 02:27:10 +0200 ++ ++openjdk-6 (6b20~pre1-0ubuntu1) maverick; urgency=low ++ ++ * Update to 6b20 code drop. ++ ++ -- Matthias Klose Wed, 14 Apr 2010 02:53:37 +0200 ++ ++openjdk-6 (6b18-1.8-4) unstable; urgency=low ++ ++ * Update from the 1.8 branch. ++ - Plugin and netx fixes. ++ - Don't link the plugin against the libxul libraries. Closes: #576361. ++ - More plugin cpu usage fixes. Closes: #584335, #587049. ++ - Plugin: fixes AppletContext.getApplets(). ++ - Fix race conditions in plugin initialization code that were causing ++ hangs when loading multiple applets in parallel. ++ * Fix Vcs-Bzr location. Closes: #530883. ++ * Search for unversioned llvm-config tool. ++ * Don't set XFILESEARCHPATH and NLSPATH on startup. LP: #586641. ++ * Fix chinese font metrics and prefer using 'WenQuanYi Micro Hei' font. ++ LP: #472845. ++ * Strip libjvm.so with --strip-debug instead of --strip-unneeded. ++ LP: #574997. ++ * Don't turn on the ARM assembler interpreter when building the shark ++ VM. ++ ++ -- Matthias Klose Thu, 15 Jul 2010 00:40:13 +0200 ++ ++openjdk-6 (6b18-1.8-3) unstable; urgency=low ++ ++ * Update from the 1.8 branch. ++ - Plugin fixes. LP: #597714. ++ * Add powerpcspe build fixes (Sebastian Andrzej Siewior). Closes: #586359. ++ * Work around build failure on buildds configured with low ARG_MAX ++ (Giovanni Mascellani). Closes: #575254. ++ ++ -- Matthias Klose Sun, 27 Jun 2010 10:16:27 +0200 ++ ++openjdk-6 (6b18-1.8-2ubuntu3) maverick; urgency=low ++ ++ * Update from the 1.8 branch. ++ - Plugin fixes. LP: #597714. ++ * Add powerpcspe build fixes (Sebastian Andrzej Siewior). Closes: #586359. ++ * Work around build failure on buildds configured with low ARG_MAX ++ (Giovanni Mascellani). Closes: #575254. ++ ++ -- Matthias Klose Sun, 27 Jun 2010 10:16:27 +0200 ++ ++openjdk-6 (6b18-1.8-2ubuntu2) maverick; urgency=low ++ ++ * Search for unversioned llvm-config tool. ++ ++ -- Matthias Klose Sun, 02 May 2010 12:03:01 +0200 ++ ++openjdk-6 (6b18-1.8-2ubuntu1) maverick; urgency=low ++ ++ * Upload to maverick. ++ ++ -- Matthias Klose Sun, 02 May 2010 11:23:16 +0200 ++ ++openjdk-6 (6b18-1.8-2) unstable; urgency=low ++ ++ * Update from the 1.8 branch. ++ - Fix build on Hitachi SH. Closes: #575346. ++ - Shark and Zero fixes. ++ * Build shark using llvm-2.7. ++ * Don't use shark to run the test harness when testing the shark build. ++ * README.Debian: Add paragraph about debugging the IcedTea NPPlugin. ++ ++ -- Matthias Klose Sat, 01 May 2010 12:35:19 +0200 ++ ++openjdk-6 (6b18-1.8-1) unstable; urgency=low ++ ++ * Upload to unstable. ++ ++ -- Matthias Klose Wed, 14 Apr 2010 02:53:37 +0200 ++ ++openjdk-6 (6b18-1.8-0ubuntu1) lucid; urgency=low ++ ++ * Update IcedTea6 to the icedtea6-1.8 release. ++ * Fix builds on Ubuntu/dapper and Debian/lenny. ++ * On hppa, configure --without-rhino --disable-plugin. ++ * Fix Hitachi SH configury. Closes: #575346. ++ * Start a window manager when running the tests. Prefer metacity, ++ as more tests pass with it. ++ * Let XToolkit.isTraySupported() return true, if Compiz is running. ++ Works around sun#6438179. LP: #300948. ++ * Make /jre/lib/security/nss.cfg a config file. ++ * Fail in the configuration of the packages, if /proc is not mounted. ++ java currently uses tricks to find its own shared libraries depending ++ on the path of the binary. Will be changed in OpenJDK7. Closes: #576453. ++ * Fix PR icedtea/469, testsuite failures with the NSS based security ++ provider. LP: #556549. ++ * Do not pass LD_LIBRARY_PATH from the plugin to the java process. ++ While libnss3.so gets loaded from /usr/lib, the dependent libraries ++ are loaded from MOZILLA_FIVE_HOME (See #561216 for the wrong firefox ++ config). LP: #561124. ++ Closes as well: LP: #551328, #554909, #560829, #549010, #553452. ++ * Always build shark with hs14. ++ ++ -- Matthias Klose Wed, 14 Apr 2010 01:53:33 +0200 ++ ++openjdk-6 (6b18~pre4-1ubuntu1) lucid; urgency=low ++ ++ * Build-depend on xulrunner-1.9.2-dev instead of xulrunner-dev, ++ unexpectedly demoted to universe. ++ * icedtea6-plugin: Hardcode dependency on xulrunner-1.9.2. No way ++ to do better? See #552780. ++ * Fix builds on Ubuntu hardy. ++ ++ -- Matthias Klose Wed, 31 Mar 2010 22:21:49 +0200 ++ ++openjdk-6 (6b18~pre4-1) unstable; urgency=high ++ ++ * Upload to unstable. ++ ++ -- Matthias Klose Wed, 31 Mar 2010 16:35:18 +0200 ++ ++openjdk-6 (6b18~pre4-0ubuntu2) lucid; urgency=low ++ ++ * Fix typo in NPPlugin code. LP: #552287. ++ ++ -- Matthias Klose Wed, 31 Mar 2010 10:41:11 +0200 ++ ++openjdk-6 (6b18~pre4-0ubuntu1) lucid; urgency=low ++ ++ [ Matthias Klose ] ++ * Update IcedTea6 form the 1.8 branch. ++ * SECURITY UPDATE: multiple upstream vulnerabilities. Upstream fixes: ++ - (CVE-2010-0837): JAR "unpack200" must verify input parameters (6902299). ++ - (CVE-2010-0845): No ClassCastException for HashAttributeSet constructors ++ if run with -Xcomp (6894807). ++ - (CVE-2010-0838): CMM readMabCurveData Buffer Overflow Vulnerability ++ (6899653). ++ - (CVE-2010-0082): Loader-constraint table allows arrays instead of ++ only the base-classes (6626217). ++ - (CVE-2010-0095): Subclasses of InetAddress may incorrectly interpret ++ network addresses (6893954) [ZDI-CAN-603]. ++ - (CVE-2010-0085): File TOCTOU deserialization vulnerability (6736390). ++ - (CVE-2010-0091): Unsigned applet can retrieve the dragged information ++ before drop action occurs (6887703). ++ - (CVE-2010-0088): Inflater/Deflater clone issues (6745393). ++ - (CVE-2010-0084): Policy/PolicyFile leak dynamic ProtectionDomains ++ (6633872). ++ - (CVE-2010-0092): AtomicReferenceArray causes SIGSEGV -> SEGV_MAPERR ++ error (6888149). ++ - (CVE-2010-0094): Deserialization of RMIConnectionImpl objects should ++ enforce stricter checks (6893947) [ZDI-CAN-588]. ++ - (CVE-2010-0093): System.arraycopy unable to reference elements ++ beyond Integer.MAX_VALUE bytes (6892265). ++ - (CVE-2010-0840): Applet Trusted Methods Chaining Privilege Escalation ++ Vulnerability (6904691). ++ - (CVE-2010-0848): AWT Library Invalid Index Vulnerability (6914823). ++ - (CVE-2010-0847): ImagingLib arbitrary code execution vulnerability ++ (6914866). ++ - (CVE-2009-3555): TLS: MITM attacks via session renegotiation. ++ - 6639665: ThreadGroup finalizer allows creation of false root ++ ThreadGroups. ++ - 6898622: ObjectIdentifer.equals is not capable of detecting incorrectly. ++ encoded CommonName OIDs. ++ - 6910590: Application can modify command array in ProcessBuilder. ++ - 6909597: JPEGImageReader stepX Integer Overflow Vulnerability. ++ - 6932480: Crash in CompilerThread/Parser. Unloaded array klass? ++ - 6898739: TLS renegotiation issue. ++ ++ [ Torsten Werner ] ++ * Switch off IPV6_V6ONLY for IN6_IS_ADDR_UNSPECIFIED addresses, too. ++ (Closes: #575163) ++ ++ -- Matthias Klose Wed, 31 Mar 2010 02:34:04 +0200 ++ ++openjdk-6 (6b18~pre3-1) unstable; urgency=low ++ ++ [ Matthias Klose ] ++ * Update IcedTea build infrastructure (20100321). ++ * Update support for SH4 (Nobuhiro Iwamatsu). ++ * Handle renaming of the plugin name. ++ ++ [ Torsten Werner ] ++ * Improve patch for IPv4 mapped IPv6 addresses even more. ++ (Closes: #573742) ++ ++ -- Matthias Klose Sun, 21 Mar 2010 22:52:12 +0100 ++ ++openjdk-6 (6b18~pre2-1ubuntu2) lucid; urgency=low ++ ++ * Fix build failure on ARM. ++ ++ -- Matthias Klose Fri, 12 Mar 2010 15:19:13 +0100 ++ ++openjdk-6 (6b18~pre2-1ubuntu1) lucid; urgency=low ++ ++ * Upload to lucid. ++ ++ -- Matthias Klose Wed, 10 Mar 2010 23:34:57 +0100 ++ ++openjdk-6 (6b18~pre2-1) unstable; urgency=low ++ ++ * Update IcedTea build infrastructure (20100310). ++ * Disable building the plugin the plugin on alpha (borked xulrunner ++ packaging using binary indep packages). ++ * Use a two stage build on alpha. ++ * Add note about the reparenting WM workaround. Closes: #573026. ++ * Prefer Sazanami instead of Kochi for Japanese fonts (Hideki Yamane). ++ Closes: #572511. ++ * openjdk-6-doc: Don't compress package-list files. Closes: #567899. ++ ++ -- Matthias Klose Wed, 10 Mar 2010 11:19:19 +0100 ++ ++openjdk-6 (6b18~pre1-4) unstable; urgency=low ++ ++ * Improve patch for IPv4 mapped IPv6 addresses. ++ ++ -- Torsten Werner Sun, 07 Mar 2010 01:14:36 +0100 ++ ++openjdk-6 (6b18~pre1-3) unstable; urgency=low ++ ++ * Add a patch for improved handling of IPv4 mapped IPv6 addresses. ++ (Closes: #560056, #561930, #563699, #563946) ++ ++ -- Torsten Werner Tue, 02 Mar 2010 23:46:57 +0100 ++ ++openjdk-6 (6b18~pre1-2) unstable; urgency=low ++ ++ * Change Build-Depends: ant1.7-optional because of a bus error in gij. ++ ++ -- Torsten Werner Mon, 01 Mar 2010 07:17:16 +0100 ++ ++openjdk-6 (6b18~pre1-1ubuntu1) lucid; urgency=low ++ ++ * Ignore error code running ant -diagnostics. ++ * Build-depend on ant-optional. ++ * Disable the cacao build on armel, fails to build with the non ++ bootstrap build. ++ ++ -- Matthias Klose Sat, 20 Feb 2010 15:36:06 +0100 ++ ++openjdk-6 (6b18~pre1-1) unstable; urgency=low ++ ++ * Upload to unstable. ++ ++ -- Matthias Klose Fri, 19 Feb 2010 21:52:32 +0100 ++ ++openjdk-6 (6b18~pre1-0ubuntu1) lucid; urgency=low ++ ++ * New Openjdk6 b18 source code drop. ++ * Use mangled copy of rhino. Closes: #512970. LP: #255149. ++ ++ -- Matthias Klose Fri, 19 Feb 2010 18:17:23 +0100 ++ ++openjdk-6 (6b17-1.7-1ubuntu1) lucid; urgency=low ++ ++ * ARM Thumb2 updates. ++ * Test build using Hotspt hs14 on ix86. ++ ++ -- Matthias Klose Sun, 31 Jan 2010 21:56:59 +0100 ++ ++openjdk-6 (6b18~pre1-1) unstable; urgency=low ++ ++ * Upload to unstable. ++ ++ -- Matthias Klose Fri, 19 Feb 2010 21:52:32 +0100 ++ ++openjdk-6 (6b18~pre1-0ubuntu1) lucid; urgency=low ++ ++ * New Openjdk6 b18 source code drop. ++ * Use mangled copy of rhino. Closes: #512970. LP: #255149. ++ ++ -- Matthias Klose Fri, 19 Feb 2010 18:17:23 +0100 ++ ++openjdk-6 (6b17-1.7-1ubuntu1) lucid; urgency=low ++ ++ * ARM Thumb2 updates. ++ * Test build using Hotspt hs14 on ix86. ++ ++ -- Matthias Klose Sun, 31 Jan 2010 21:56:59 +0100 ++ ++openjdk-6 (6b17-1.7-1) unstable; urgency=low ++ ++ * Upload to unstable. ++ ++ -- Matthias Klose Wed, 27 Jan 2010 23:44:47 +0100 ++ ++openjdk-6 (6b17-1.7-0ubuntu1) lucid; urgency=low ++ ++ * IcedTea6 1.7 release. ++ * Don't try to load libjpeg7; still building with libjpeg62. Closes: #563999. ++ * Run the testsuite on sh4. ++ * Ubuntu only: Implement an execute bit checker for the Non-Exec Policy ++ - debian/JB-java.desktop.in: update mime handler to use new launcher. ++ * armel: Apply the thumb2 patches from the trunk, plus proposed patches ++ for the trunk. ++ ++ -- Matthias Klose Wed, 27 Jan 2010 22:48:24 +0100 ++ ++openjdk-6 (6b17-0ubuntu1) lucid; urgency=low ++ ++ * Build from the IcedTea6-1.7 branch. ++ * Don't build the plugin on sparc64. ++ * Enable the NPPlugin. ++ * Add support for SH4 (Nobuhiro Iwamatsu). ++ * Fix crash in the ARM assembler interpreter (Edward Nevill). ++ ++ -- Matthias Klose Wed, 06 Jan 2010 15:52:50 +0100 ++ ++openjdk-6 (6b17~pre3-1ubuntu2) lucid; urgency=low ++ ++ * Update IcedTea build infrastructure (20091224). ++ * Explicitely build-depend on x11-xkb-utils (xkbcomp is needed by ++ xvfb-run). ++ ++ -- Matthias Klose Thu, 24 Dec 2009 12:43:00 +0100 ++ ++openjdk-6 (6b17~pre3-1ubuntu1) lucid; urgency=low ++ ++ * Upload to lucid. ++ ++ -- Matthias Klose Fri, 18 Dec 2009 10:40:05 +0100 ++ ++openjdk-6 (6b17~pre3-1) unstable; urgency=low ++ ++ * Update IcedTea build infrastructure (20091218). ++ * Install docs into the openjdk-6-jre-headless directory instead of ++ openjdk-6-jre. ++ ++ -- Matthias Klose Fri, 18 Dec 2009 10:00:08 +0100 ++ ++openjdk-6 (6b17~pre2-1ubuntu1) lucid; urgency=low ++ ++ * Update IcedTea build infrastructure (20091215). ++ * Fix cacao build on armel with current optimization defaults. ++ ++ -- Matthias Klose Tue, 15 Dec 2009 16:41:12 +0100 ++ ++openjdk-6 (6b17~pre2-1) unstable; urgency=low ++ ++ * Upload to unstable. ++ ++ -- Matthias Klose Mon, 09 Nov 2009 21:50:52 +0100 ++ ++openjdk-6 (6b17~pre2-0ubuntu3) lucid; urgency=low ++ ++ * Security updates: ++ - (CVE-2009-3728) ICC_Profile file existence detection information leak ++ (6631533). ++ - (CVE-2009-3885) BMP parsing DoS with UNC ICC links (6632445). ++ - (CVE-2009-3881) resurrected classloaders can still have children ++ (6636650). ++ - (CVE-2009-3882) Numerous static security flaws in Swing (findbugs) ++ (6657026). ++ - (CVE-2009-3883) Mutable statics in Windows PL&F (findbugs) (6657138). ++ - (CVE-2009-3880) UI logging information leakage (6664512). ++ - (CVE-2009-3879) GraphicsConfiguration information leak (6822057). ++ - (CVE-2009-3884) zoneinfo file existence information leak (6824265). ++ - (CVE-2009-2409) deprecate MD2 in SSL cert validation (Kaminsky) (6861062). ++ - (CVE-2009-3873) JPEG Image Writer quantization problem (6862968). ++ - (CVE-2009-3875) MessageDigest.isEqual introduces timing attack ++ vulnerabilities (6863503). ++ - (CVE-2009-3876, CVE-2009-3877) OpenJDK ASN.1/DER input stream parser ++ denial of service (6864911). ++ - (CVE-2009-3869) JRE AWT setDifflCM stack overflow (6872357). ++ - (CVE-2009-3874) ImageI/O JPEG heap overflow (6874643. ++ - (CVE-2009-3871) JRE AWT setBytePixels heap overflow (6872358). ++ * Update IcedTea build infrastructure (20091109). ++ * Use hs16 on armel. ++ ++ -- Matthias Klose Mon, 09 Nov 2009 17:48:43 +0100 ++ ++openjdk-6 (6b17~pre2-0ubuntu2) lucid; urgency=low ++ ++ * Don't use hs16 on armel and sparc. ++ ++ -- Matthias Klose Mon, 02 Nov 2009 15:33:00 +0100 ++ ++openjdk-6 (6b17~pre2-0ubuntu1) lucid; urgency=low ++ ++ * New code drop (b17). ++ * Bump hotspot to hs16. ++ * Update IcedTea build infrastructure (20091031). ++ * Set priority of default -jre and -jdk packages to optional. ++ * Fix binary-all to binary-any dependencies. Closes: #550680. ++ ++ -- Matthias Klose Sat, 31 Oct 2009 19:30:54 +0100 ++ ++openjdk-6 (6b16-1.6.1-2) unstable; urgency=medium ++ ++ * Build-depend on xulrunner-dev (>= 1.9.1.3-3). ++ ++ -- Matthias Klose Sun, 11 Oct 2009 21:34:48 +0200 ++ ++openjdk-6 (6b16-1.6.1-1ubuntu3) karmic; urgency=low ++ ++ [Matthias Klose] ++ * On armel and powerpc, build an additional VM using shark in the ++ openjdk-6-jre-zero package (java -shark ). Requires llvm-2.6. ++ * Hide the desktop menu entry for WebStart. LP: #222180. ++ * Don't provide java-virtual-machine anymore. ++ ++ [Edward Nevill] ++ * Avoid stack overflows in the arm interpreter. ++ ++ -- Matthias Klose Thu, 08 Oct 2009 12:41:46 +0200 ++ ++openjdk-6 (6b16-1.6.1-1ubuntu2) karmic; urgency=low ++ ++ * Support PKCS11 cryptography via NSS, now allowing import of all ++ certificates from ca-certificates. ++ * Remove Michael Koch from uploaders, request by himself. ++ * Add the doc dir symlink for openjdk-6-jre-zero when the package ++ is built with shark support. ++ ++ -- Matthias Klose Mon, 28 Sep 2009 21:55:08 +0200 ++ ++openjdk-6 (6b16-1.6.1-1ubuntu1) karmic; urgency=low ++ ++ * Fix dependency on the java bridge packages. ++ * debian/rules: Conditionalize stuff so that the recent release ++ is never mentioned. ++ * Remove obsolete patches in debian/patches. ++ * Rebuild on armel to fix up libffi for the soft float abi. ++ * For jaunty builds, fix IcedTeaPlugin failure to start with xulrunner 1.9.1 ++ (LP: #359407). ++ - debian/patches/icedtea-plugin-use-runtime-nsIProcess-IID.diff: Add. ++ - debian/rules: Apply it for jaunty builds. ++ * Use pulseaudio as default serviceprovider for ++ javax.sound.midi.MidiSystem and javax.sound.sampled.AudioSystem. ++ LP: #407299. ++ ++ -- Matthias Klose Sat, 26 Sep 2009 16:01:48 +0200 ++ ++openjdk-6 (6b16-1.6.1-1) unstable; urgency=low ++ ++ * Upload to Debian unstable. ++ ++ -- Matthias Klose Tue, 15 Sep 2009 02:17:03 +0200 ++ ++openjdk-6 (6b16-1.6.1-0ubuntu1) karmic; urgency=low ++ ++ * Update IcedTea6 to the 1.6.1 release. ++ * Work around GCC PR target/41327, build the JDK on s390 with -O2. ++ ++ -- Matthias Klose Thu, 10 Sep 2009 01:55:08 +0200 ++ ++openjdk-6 (6b16-1.6-1) unstable; urgency=low ++ ++ * Update IcedTea6 to the 1.6 release. ++ * Fix GCC build dependencies. ++ ++ -- Matthias Klose Wed, 09 Sep 2009 22:18:38 +0200 ++ ++openjdk-6 (6b16-1.6~pre2-1) unstable; urgency=low ++ ++ * Upload to unstable. ++ ++ -- Matthias Klose Fri, 28 Aug 2009 19:04:31 +0200 ++ ++openjdk-6 (6b16-1.6~pre2-0ubuntu1) karmic; urgency=low ++ ++ * Update IcedTea from the 1.6 release branch: ++ - Fix buffer overflow in debugger's socket handler (Kees Cook). ++ https://bugs.openjdk.java.net/show_bug.cgi?id=100103. LP: #409736. ++ - plugin fixes. ++ * Move the pulseaudio recommendation to a suggestion, don't build-depend ++ on pulseaudio. Closes: #539394. LP: #361408. ++ * Build for armv6 (on armel). ++ ++ [ Kees Cook ] ++ * debian/rules: Re-enable fortification and stack protector ++ (LP: #330713). ++ * Adding stack markings to the x86 assembly for not using executable ++ stack. LP: #419018. ++ ++ -- Matthias Klose Fri, 28 Aug 2009 18:51:34 +0200 ++ ++openjdk-6 (6b16-1.6~pre1-0ubuntu1) karmic; urgency=low ++ ++ * Test build (icedtea6-1.6 release branch). ++ ++ -- Matthias Klose Fri, 21 Aug 2009 19:44:42 +0200 ++ ++openjdk-6 (6b16~pre5-0ubuntu2) karmic; urgency=low ++ ++ * Add explicit build dependency on libgtk2.0-dev. ++ ++ -- Matthias Klose Thu, 06 Aug 2009 11:39:14 +0200 ++ ++openjdk-6 (6b16~pre5-0ubuntu1) karmic; urgency=low ++ ++ * Bump hotspot to hs14b16. ++ * Update IcedTea build infrastructure (20090805). ++ * patches/java-access-bridge-security.patch: Update. ++ * Build-depend on xulrunner-dev instead of xulrunner-1.9-dev on karmic. ++ * Don't recommend the jck fonts anymore, just suggest them; the appropriate ++ fonts are installed as dependencies of the language packs. ++ ++ -- Matthias Klose Thu, 06 Aug 2009 10:27:09 +0200 ++ ++openjdk-6 (6b16~pre4-0ubuntu7) karmic; urgency=low ++ ++ * Build using GCC-4.4 on sparc as well, require 4.4.1. ++ ++ -- Matthias Klose Thu, 23 Jul 2009 18:23:14 +0200 ++ ++openjdk-6 (6b16~pre4-0ubuntu6) karmic; urgency=low ++ ++ * Fix build failure building the zero VM. ++ ++ -- Matthias Klose Thu, 16 Jul 2009 09:49:36 -0400 ++ ++openjdk-6 (6b16~pre4-0ubuntu5) karmic; urgency=low ++ ++ [Matthias Klose] ++ * Update IcedTea build infrastructure (20090715). ++ * Tighten build dependency on llvm-dev. ++ ++ [Edward Nevill] ++ * Add armv4 compatibility. ++ ++ -- Matthias Klose Wed, 15 Jul 2009 15:40:44 -0400 ++ ++openjdk-6 (6b16~pre4-0ubuntu4) karmic; urgency=low ++ ++ [Edward Nevill] ++ * Added Bytecode Interpreter Generator. ++ * Added ARM templates for above. ++ * Removed old optimised ARM assebler. ++ * Added -g0 because of problems with ld linking -g. ++ * Changed alignment to 64 now that as bug is fixed. ++ ++ [Matthias Klose] ++ * Update IcedTea build infrastructure (20090710). ++ * Let the -jre package depend on the access-bridge package, not the ++ -jre-headless package. LP: #395074. ++ * Suggested by Ed Nevill: ++ - Pass -timeout:3 when running the jtreg testsuite on zero architectures. ++ - Pass -Xmx256M -vmoption:-Xmx256M on armel for the jtreg testsuite run. ++ * Tighten build dependency on llvm-dev. ++ ++ -- Matthias Klose Fri, 03 Jul 2009 18:32:50 +0200 ++ ++openjdk-6 (6b16~pre4-0ubuntu3) karmic; urgency=low ++ ++ * Update zero-port-opt patch on armel. ++ ++ -- Matthias Klose Wed, 24 Jun 2009 10:48:48 +0200 ++ ++openjdk-6 (6b16~pre4-0ubuntu2) karmic; urgency=low ++ ++ * Update IcedTea build infrastructure (20090623). ++ * Reapply the zero-port-opt patch on armel. ++ * Do not use the IPA Mona font family by default. Closes: #521233. ++ * Build cacao with -fno-strict-aliasing. ++ ++ -- Matthias Klose Tue, 23 Jun 2009 16:23:38 +0200 ++ ++openjdk-6 (6b16-4) unstable; urgency=medium ++ ++ * Build the zero binary package when building with shark. ++ * Build-depend on cpio. Closes: #532963. ++ ++ -- Matthias Klose Tue, 16 Jun 2009 07:52:19 +0200 ++ ++openjdk-6 (6b16-3) unstable; urgency=low ++ ++ * Update IcedTea build infrastructure (20090612). ++ * Install the libaccess-bridge-java* symlinks again. ++ * Build zero on ix86 architectures with JIT support (shark). To use the zero ++ build without shark, use the `-Xint' option to operate in interpreted-only ++ mode. ++ ++ -- Matthias Klose Fri, 12 Jun 2009 17:31:34 +0200 ++ ++openjdk-6 (6b16-2) unstable; urgency=low ++ ++ * Don't install libaccess-bridge-java* symlinks until ++ libaccess-bridge-java-jni is available on all architectures. ++ * Add missing build dependency on cacao-source. ++ ++ -- Matthias Klose Mon, 18 May 2009 14:02:59 +0200 ++ ++openjdk-6 (6b16-1) unstable; urgency=low ++ ++ * Upload to unstable, based in 6b16 and IcedTea 1.5. ++ ++ -- Matthias Klose Sun, 17 May 2009 23:02:46 +0200 ++ ++openjdk-6 (6b16~pre3-0ubuntu1) karmic; urgency=low ++ ++ * Update to hotspot hs14b15. ++ * Provide symlink for libjava-access-bridge-jni.so. LP: #375347. ++ ++ -- Matthias Klose Fri, 15 May 2009 00:41:24 +0200 ++ ++openjdk-6 (6b16~pre2-0ubuntu3) karmic; urgency=low ++ ++ * Update IcedTea build infrastructure (20090513). ++ * Fix build failure when xvfb-run doesn't work, trying to access a ++ non-existing directory. ++ ++ -- Matthias Klose Wed, 13 May 2009 23:01:23 +0200 ++ ++openjdk-6 (6b16~pre2-0ubuntu2) karmic; urgency=low ++ ++ * Add libffi-dev as architecture independent build dependency. ++ ++ -- Matthias Klose Mon, 11 May 2009 08:41:42 +0200 ++ ++openjdk-6 (6b16~pre2-0ubuntu1) karmic; urgency=low ++ ++ * Update to re-tagged code drop (b16). ++ * Update IcedTea build infrastructure (20090510). ++ * Remove patches integrated in IcedTea. ++ * Remove GCJ Web Plugin support. ++ * Remove build infrastructure to build additional VM's, integrated ++ in IcedTea. ++ * Stop building the openjdk-6-source-files package. ++ * README.Debian: Document using the different VM's. ++ * Use GCC-4.3 on sparc, ICE with GCC-4.4. ++ * Fix problem with the ARM assembler interpreter, when executing a 'new' ++ bytecode with a double on the top of the stack (Edward Nevill). ++ * Run the testsuite for the zero build on ix86 architectures. ++ ++ -- Matthias Klose Sun, 10 May 2009 23:37:42 +0200 ++ ++openjdk-6 (6b16~pre1-0ubuntu1) karmic; urgency=low ++ ++ * New code drop (b16). ++ * Update IcedTea build infrastructure (20090429). ++ * Merge changes from 6b14-1.4.1. ++ * Fix section names (using the java section). ++ * Remove all UTF-8 sequence definitions from the font configuration. ++ ++ -- Matthias Klose Wed, 29 Apr 2009 11:33:07 +0200 ++ ++openjdk-6 (6b14-1.4.1-0ubuntu9) jaunty; urgency=low ++ ++ * Reenable the testsuite (revert the change in last upload). ++ * Apply fix for the ARM bytecode interpreter (Edward Nevill). ++ ++ -- Matthias Klose Sat, 18 Apr 2009 11:24:27 +0200 ++ ++openjdk-6 (6b14-1.4.1-0ubuntu7) jaunty; urgency=low ++ ++ * Don't use some indian fonts with diverging font metrics for the ++ latin-1.UTF-8 encoding. LP: #289784. ++ * Disable running the testsuite for this build (no code changes compared ++ to the previous upload). ++ ++ -- Matthias Klose Tue, 14 Apr 2009 11:46:25 +0200 ++ ++openjdk-6 (6b14-1.4.1-0ubuntu6) jaunty; urgency=low ++ ++ * Fix native2ascii jdk test case, which let the jdk testsuite fail. ++ ++ -- Matthias Klose Fri, 10 Apr 2009 11:58:44 +0200 ++ ++openjdk-6 (6b14-1.4.1-0ubuntu5) jaunty; urgency=low ++ ++ [Matthias Klose] ++ * Update to the final CACAO 0.99.4 release. ++ * Security Vulnerability Fixes for OpenJDK: ++ - 6522586: Enforce limits on Font creation. ++ - 6536193: flaw in UTF8XmlOutput. ++ - 6610888: Potential use of cleared of incorrect acc in JMX Monitor. ++ - 6610896: JMX Monitor handles thread groups incorrectly. ++ - 6630639: lightweight HttpServer leaks file descriptors on no-data ++ connections. ++ - 6632886: Font.createFont can be persuaded to leak temporary files. ++ - 6636360: compiler/6595044/Main.java test fails with 64bit java on ++ solaris-sparcv9 with SIGSEGV. ++ - 6652463: MediaSize constructors allow to redefine the mapping of ++ standard MediaSizeName values. ++ - 6652929: Font.createFont(int,File) trusts File.getPath. ++ - 6656633: getNotificationInfo methods static mutable (findbugs). ++ - 6658158: Mutable statics in SAAJ (findbugs). ++ - 6658163: txw2.DatatypeWriter.BUILDIN is a mutable static (findbugs). ++ - 6691246: Thread context class loader can be set using JMX remote ++ ClientNotifForwarded. ++ - 6717680: LdapCtx does not close the connection if initialization fails. ++ - 6721651: Security problem with out-of-the-box management. ++ - 6737315: LDAP serialized data vulnerability. ++ - 6792554: Java JAR Pack200 header checks are insufficent. ++ - 6804996: JWS PNG Decoding Integer Overflow [V-flrhat2ln8]. ++ - 6804997: JWS GIF Decoding Heap Corruption [V-r687oxuocp]. ++ - 6804998: JRE GIF Decoding Heap Corruption [V-y6g5jlm8e1]. ++ * Add security patch for the lcms library. ++ * Add accessibility patches java-access-bridge-security.patch and ++ accessible-toolkit.patch. ++ * Merge fixes for testsuite failures from the IcedTea6 branch. ++ * Merge the proxy support for the plugin from the IcedTea6 branch. ++ * Merge http://icedtea.classpath.org/hg/release/icedtea6-1.4.1/rev/546ef0cdee06 ++ (TJ). LP: #344705. ++ * Add a Xb-Npp-Description for the IcedTea plugin. LP: #272010. ++ ++ [Edward Nevill] ++ * Put VFP back in - selects VFP / novfp autmatically ++ * More assembler optimisations ++ ++ -- Matthias Klose Thu, 09 Apr 2009 14:14:12 +0200 ++ ++openjdk-6 (6b14-1.4.1-0ubuntu4) jaunty; urgency=low ++ ++ * Disable the additional zero JVM on sparc. ++ * patches/hotspot/default/icedtea-includedb.patch: Add missing include in ++ openjdk/hotspot/src/share/vm/includeDB_core. ++ * Fix build failure of the zero VM on lpia with a fixed GCC. ++ ++ -- Matthias Klose Fri, 13 Mar 2009 16:39:27 +0100 ++ ++openjdk-6 (6b14-1.4.1-0ubuntu3) jaunty; urgency=low ++ ++ * Build the Zero/Shark VM as an additional JVM (call as `java -zero'). ++ ++ -- Matthias Klose Sat, 07 Mar 2009 20:54:28 +0100 ++ ++openjdk-6 (6b14-1.4.1-0ubuntu2) jaunty; urgency=low ++ ++ * Fix zero-port-opt patching (build failure on armel). ++ ++ -- Matthias Klose Thu, 05 Mar 2009 18:19:18 +0100 ++ ++openjdk-6 (6b14-1.4.1-0ubuntu1) jaunty; urgency=low ++ ++ * Update IcedTea to the 1.4.1 release. ++ * Build the Cacao VM as an additional JVM (call as `java -cacao'). ++ * Build in separate build directory. ++ ++ -- Matthias Klose Thu, 05 Mar 2009 15:16:02 +0100 ++ ++openjdk-6 (6b14-1.5~pre1-5) unstable; urgency=low ++ ++ * Fix build failure on armel. ++ * Require the final cacao-0.99.4 release. ++ * Add /usr/lib/jni to the library path. Closes: #517338. ++ ++ -- Matthias Klose Tue, 17 Mar 2009 09:37:20 +0100 ++ ++openjdk-6 (6b14-1.5~pre1-4) unstable; urgency=low ++ ++ * Disable the additional zero JVM on sparc. ++ * Fix casts in hotspot on s390. Closes: #518823. ++ * Add special flags for javac on s390 to work around a VM problem with bad ++ code generation during inlining. ++ * Run the testsuites for the default VM on all architectures. ++ * Update IcedTea (20090314). ++ ++ -- Matthias Klose Sat, 14 Mar 2009 18:29:49 +0100 ++ ++openjdk-6 (6b14-1.5~pre1-3) unstable; urgency=low ++ ++ * Don't configure the additional zero JVM with --enable-shark, currently ++ fails to build. ++ * Don't build the JDK when building the additional zero JVM. ++ ++ -- Matthias Klose Sat, 07 Mar 2009 21:11:16 +0100 ++ ++openjdk-6 (6b14-1.5~pre1-2) experimental; urgency=low ++ ++ * Build the Zero/Shark VM as an additional JVM (call as `java -zero'). ++ ++ -- Matthias Klose Fri, 06 Mar 2009 11:16:07 +0100 ++ ++openjdk-6 (6b14-1.5~pre1-0ubuntu1) jaunty; urgency=low ++ ++ * Update hotspot to 14.0-b10. ++ * Update IcedTea (20090305). ++ * Build the Cacao VM as an additional JVM (call as `java -cacao'). ++ * Build in separate build directory. ++ ++ -- Matthias Klose Thu, 05 Mar 2009 15:16:02 +0100 ++ ++openjdk-6 (6b14-0ubuntu17) jaunty; urgency=low ++ ++ [ Edward Nevill ] ++ * Remove VFP from asm loop ++ * Disble the mauve testsuite for armel. ++ ++ [Matthias Klose] ++ * Update IcedTea (20090302). ++ ++ -- Matthias Klose Mon, 02 Mar 2009 16:18:01 +0100 ++ ++openjdk-6 (6b14-0ubuntu16) jaunty; urgency=low ++ ++ * Regenerate auto files. ++ ++ -- Matthias Klose Wed, 18 Feb 2009 15:20:10 +0100 ++ ++openjdk-6 (6b14-0ubuntu15) jaunty; urgency=low ++ ++ [ Edward Nevill ] ++ * Added ARM assembler interpreter loop ++ * mauve and jtreg removed again for alpha release ++ ++ [Matthias Klose] ++ * Update IcedTea (20090218). ++ ++ -- Matthias Klose Wed, 18 Feb 2009 13:35:02 +0100 ++ ++openjdk-6 (6b14-0ubuntu14) jaunty; urgency=low ++ ++ * Configure with --disable-nio2 on armel as well. ++ ++ -- Matthias Klose Fri, 13 Feb 2009 10:13:55 +0100 ++ ++openjdk-6 (6b14-0ubuntu13) jaunty; urgency=low ++ ++ * Really configure with --disable-nio2. ++ ++ -- Matthias Klose Thu, 12 Feb 2009 17:26:43 +0100 ++ ++openjdk-6 (6b14-0ubuntu12) jaunty; urgency=low ++ ++ * Configure with --disable-nio2. ++ * Run mauve and jtreg testsuites on armel. ++ ++ -- Matthias Klose Wed, 11 Feb 2009 13:00:15 +0100 ++ ++openjdk-6 (6b14-0ubuntu11) jaunty; urgency=low ++ ++ [Edward Nevill] ++ * Performance improvements for the zero build (currently applied ++ on armel only). ++ ++ [Matthias Klose] ++ * Update IcedTea (20090210). ++ ++ -- Matthias Klose Tue, 10 Feb 2009 15:29:22 +0100 ++ ++openjdk-6 (6b14-0ubuntu10) jaunty; urgency=low ++ ++ * Explicitely remove PulseAudioMuteControl.java. ++ ++ -- Matthias Klose Mon, 26 Jan 2009 22:09:21 +0100 ++ ++openjdk-6 (6b14-0ubuntu9.1) jaunty; urgency=low ++ ++ * Test build. ++ ++ -- Matthias Klose Sun, 25 Jan 2009 17:03:51 +0100 ++ ++openjdk-6 (6b14-0ubuntu9) jaunty; urgency=low ++ ++ * Update IcedTea build infrastructure (20090125). ++ * Run the jtreg testsuite for cacao builds, save all test logs. ++ ++ -- Matthias Klose Sun, 25 Jan 2009 15:40:52 +0100 ++ ++openjdk-6 (6b14-0ubuntu8) jaunty; urgency=low ++ ++ * Fix merge error, don't apply patches twice. ++ ++ -- Matthias Klose Mon, 19 Jan 2009 01:15:19 +0100 ++ ++openjdk-6 (6b14-0ubuntu7) jaunty; urgency=low ++ ++ * debian/rules: Call dh_desktop. LP: #309999. ++ * Add dependency on libjpeg62 for the -headless package. LP: #318383. ++ * Test some jtreg tests which fail in samevm mode in a separate VM. ++ * Build all with -fno-stack-protector -U_FORTIFY_SOURCE. ++ * Include jtr files of failed tests in the -jdk package. ++ ++ -- Matthias Klose Mon, 19 Jan 2009 00:23:22 +0100 ++ ++openjdk-6 (6b14-0ubuntu6) jaunty; urgency=low ++ ++ * The -source package now contains all source files. Closes: #504568. ++ * The 6b14 build fixes the following security related issues (sun bug number, ++ CVE, description): ++ - 6588160, CVE-2008-5348: jaas krb5 client leaks OS-level UDP sockets. ++ - 6497740, CVE-2009-5349: Limit the size of RSA public keys. ++ - 6484091, CVE-2008-5350: FileSystemView leaks directory info. ++ - 4486841, CVE-2008-5351: UTF-8 decoder needed adhere to Unicode 3.0.1 ++ fixes. ++ - 6755943, CVE-2008-5352: JAR decompression should enforce stricter header ++ checks. ++ - 6734167, CVE-2008-5353: Calendar.readObject allows elevation of ++ privileges. ++ - 6733959, CVE-2008-5354: Insufficient checks for "Main-Class" manifest ++ entry in JAR files ++ - 6751322, CVE-2008-5356: Sun Java JRE TrueType Font Parsing Heap Overflow. ++ - 6733336, CVE-2008-5357: Crash on malformed font. ++ - 6766136, CVE-2008-5358: corrupted gif image may cause crash in java ++ splashscreen library. ++ - 6726779, CVE-2008-5359: ConvolveOp on USHORT raster can cause the JVM ++ crash. ++ - 6721753, CVE-2008-5360: File.createTempFile produces guessable file names. ++ - 6592792: Add com.sun.xml.internal to the "package.access" property in ++ $JAVA_HOME/lib/security/java.security. ++ * Regenerate the control file. ++ ++ -- Matthias Klose Tue, 13 Jan 2009 15:04:36 +0100 ++ ++openjdk-6 (6b14-0ubuntu2~ppa1) intrepid; urgency=low ++ ++ * Upload to the OpenJDK PPA for intrepid. ++ ++ -- Matthias Klose Tue, 13 Jan 2009 13:28:38 +0100 ++ ++openjdk-6 (6b14-0ubuntu5) jaunty; urgency=low ++ ++ * Update IcedTea build infrastructure (20090113). ++ * Rebuild with updated nss/nspr packages. ++ * Update debug patches. ++ * debian/patches/nonreparenting-wm.diff: If the _JAVA_AWT_WM_NONREPARENTING ++ environment variable is set, disable all workarounds causing blank windows ++ in non-reparentizing window managers (Bernhard R. Link). Closes: #508650. ++ * Fix location of plugin for Debian. Closes: #505726. ++ * Let the -jdk package provide java-compiler. Closes: #507641. ++ ++ -- Matthias Klose Tue, 13 Jan 2009 10:18:44 +0100 ++ ++openjdk-6 (6b14-0ubuntu4) jaunty; urgency=low ++ ++ * Use a default jvm.cfg not only for java, but for all jdk tools. ++ LP: #288616. ++ * Update IcedTea build infrastructure (20081223). ++ ++ -- Matthias Klose Tue, 23 Dec 2008 09:24:23 +0100 ++ ++openjdk-6 (6b14-0ubuntu3) jaunty; urgency=low ++ ++ * Update IcedTea build infrastructure (20081217). ++ * Add support for PARISC. ++ * Use a default jvm.cfg if the jvm.cfg doesn't yet exist after unpacking ++ the runtime package. LP: #288616. ++ ++ -- Matthias Klose Wed, 17 Dec 2008 09:58:26 +0100 ++ ++openjdk-6 (6b14~pre1-0ubuntu2) jaunty; urgency=low ++ ++ * Install hotspot tarball in -source-files package. ++ * Fix build failure on sparc. ++ ++ -- Matthias Klose Fri, 05 Dec 2008 09:43:51 +0100 ++ ++openjdk-6 (6b14~pre1-0ubuntu1) jaunty; urgency=low ++ ++ * New code drop (b14). ++ * Update IcedTea build infrastructure (20081204). ++ ++ -- Matthias Klose Thu, 04 Dec 2008 11:40:56 +0100 ++ ++openjdk-6 (6b13~pre2-0ubuntu2) jaunty; urgency=low ++ ++ * Update IcedTea build infrastructure (20081203). ++ - Fix build failures on zero based architectures. ++ * Add build dependency on libxrender-dev. ++ * Don't use fastjar on ia64, working around a build failure. ++ ++ -- Matthias Klose Wed, 03 Dec 2008 18:43:23 +0100 ++ ++openjdk-6 (6b13~pre2-0ubuntu1) jaunty; urgency=low ++ ++ * Update IcedTea build infrastructure (20081202). ++ - Add Dec 3 OpenJDK security patches ++ * Build with --with-hotspot-build, update patches for this config. ++ ++ -- Matthias Klose Wed, 03 Dec 2008 01:05:54 +0100 ++ ++openjdk-6 (6b13~pre1-0ubuntu4) jaunty; urgency=low ++ ++ * Update IcedTea build infrastructure (20081122). ++ - Fixes for the IcedTeaPlugin. LP: #300254. ++ ++ -- Matthias Klose Sat, 22 Nov 2008 10:42:10 +0100 ++ ++openjdk-6 (6b13~pre1-0ubuntu3) jaunty; urgency=low ++ ++ * Fix versioned conflict on icedtea-gcjwebplugin. LP: #284299. ++ * Update IcedTea build infrastructure (20081121). ++ - Fixes for the IcedTeaPlugin. ++ * Configure with pulseaudio in jaunty. ++ ++ -- Matthias Klose Fri, 21 Nov 2008 07:46:10 +0100 ++ ++openjdk-6 (6b13~pre1-0ubuntu2) jaunty; urgency=low ++ ++ * Update IcedTea build infrastructure (20081117). ++ * Use openjdk-6 as stage1 compiler on armel for a faster build. ++ ++ -- Matthias Klose Mon, 17 Nov 2008 18:43:17 +0100 ++ ++openjdk-6 (6b13~pre1-0ubuntu1) jaunty; urgency=low ++ ++ * New code drop (b13). ++ - In the langtools area, besides a few miscellaneous bug fixes (6760834, ++ 6725036, 6657499), all the langtools regression tests now pass out of ++ the box (6728697, 6707027) and if using the most recent version of ++ jtreg, the langtools regression tests can be run in the much faster ++ "same vm" mode of jtreg, enabled with the -s option (6749967, 6748541, ++ 6748546, 6748601, 6759775, 6759795, 6759796, 6759996, 6760805, 6760930). ++ - Gervill update, including applying a patch from IcedTea (6758986, ++ 6748247, 6748251). ++ - Publishing a few dozen additional existing regression tests as open ++ source (6601457, 6759433, 6740185). ++ - JMX and monitoring fixes (6651382, 6616825, 6756202, 6754672). ++ - Man page updates (6757036, 6392810, 6504867, 6326773). ++ - Assorted other fixes (6746055, 6621697, 6756569, 6356642, 6761678). ++ * Update IcedTea build infrastructure (20081111). ++ - Fix freeze in midi app, LP: #275672. ++ - Fixes in the IcedTeaPlugin: LP: #282570, LP: #282570, LP: #285729, ++ LP: #291377, LP: #37330, LP: #239533. ++ - Fix vertical text metrics with freetype scaler. LP: #289784. ++ * Build-depend on ecj-gcj instead of ecj on architectures using ++ gij/gcj as the bootstrap system. ++ * Fixed in 6b12: Stack overflow running Kea. LP: #244731. ++ ++ -- Matthias Klose Tue, 11 Nov 2008 12:39:16 +0100 ++ ++openjdk-6 (6b12-1~exp1) experimental; urgency=low ++ ++ * Update IcedTea build infrastructure (20081025). ++ ++ -- Matthias Klose Sat, 25 Oct 2008 23:35:14 +0200 ++ ++openjdk-6 (6b12-0ubuntu6) intrepid; urgency=low ++ ++ * Make the dependency on ca-certificates-java unversioned. ++ * Merge from IcedTea: ++ - plugin/icedtea/netscape/javascript/JSObject.java: Make ++ long constructor public. ++ ++ -- Matthias Klose Fri, 24 Oct 2008 23:06:32 +0200 ++ ++openjdk-6 (6b12-0ubuntu5) intrepid; urgency=low ++ ++ * Update IcedTea build infrastructure (20081024). ++ - Add --pkgversion= configure option. ++ - IcedTeaPlugin fixes. ++ - Fix xjc regressions. ++ * openjdk-jre-headless: Depend on ca-certificates-java. ++ * Configure with --pkgversion= to encode the package ++ version in the -version output and in vm dumps. ++ * cacao: Handle VM options Xverify:all and Xverify:none. ++ ++ -- Matthias Klose Fri, 24 Oct 2008 13:39:29 +0200 ++ ++openjdk-6 (6b12-0) experimental; urgency=low ++ ++ * Upload to experimental. ++ * Pretend the availability of at least 384MB RAM; better swap on the ++ buildds than failing the build. ++ ++ -- Matthias Klose Sun, 19 Oct 2008 11:15:28 +0200 ++ ++openjdk-6 (6b12-0ubuntu4) intrepid; urgency=low ++ ++ * Update IcedTea build infrastructure (20081019). ++ - plugin fix (Make applet size factors doubles instead of ints). ++ * Don't fail the build when the jtreg summary is missing. ++ * openjdk-6-source-files: Fix priority and section of the binary package. ++ * Fix section of the plugin package. ++ ++ -- Matthias Klose Sun, 19 Oct 2008 09:29:03 +0200 ++ ++openjdk-6 (6b12-0ubuntu3) intrepid; urgency=low ++ ++ * Update IcedTea build infrastructure (20081018). ++ - Fix LiveConnect issues in the web plugin. LP: #282762. ++ - Fail the build, if patches don't apply. ++ * Show xvfb and xauth failures in the build log, when running the testsuites. ++ * Kill processes which still hang after running the testsuite. Closes: #493339. ++ * Run the testsuite in parallel, reducing build time. ++ * openjdk-headless: Depend instead of recommending tzdata-java. ++ ++ -- Matthias Klose Sat, 18 Oct 2008 17:15:14 +0200 ++ ++openjdk-6 (6b12-0ubuntu2) intrepid; urgency=low ++ ++ * icedtea6-plugin: Versioned conflict with icedtea-gcjwebplugin. LP: #184299. ++ * Don't configure --with-alt-jar=/usr/bin/fastjar on hotspot archs ++ and cacao builds. Working around a problem generating rt.jar. Manually ++ add the netscape/javascript files in zero builds. ++ * Update IcedTea build infrastructure (20081017). ++ - configury updates. ++ - IcedTeaPlugin update. ++ * openjdk-6-jdk: Suggest visualvm. ++ * Remove cacao patches found in cacao 0.99.4~20081012. ++ ++ -- Matthias Klose Fri, 17 Oct 2008 13:25:45 +0200 ++ ++openjdk-6 (6b12-0ubuntu1) intrepid; urgency=low ++ ++ * Update IcedTea to the 1.3.0 release. ++ * Apply upstream patch to fix upstream issue 6758986. ++ ++ -- Matthias Klose Wed, 15 Oct 2008 19:49:05 +0000 ++ ++openjdk-6 (6b12~pre2-1) experimental; urgency=low ++ ++ * Upload to experimental. ++ ++ -- Matthias Klose Sun, 12 Oct 2008 11:16:03 +0000 ++ ++openjdk-6 (6b12~pre2-0ubuntu3) intrepid; urgency=low ++ ++ * Update IcedTea build infrastructure (20081015). ++ * Add netscape/javascript class files to rt.jar. LP: #282762. ++ * Be more verbose in the icedtea6-plugin package description. ++ * Fix some lintian warnings. ++ ++ -- Matthias Klose Wed, 15 Oct 2008 12:45:59 +0200 ++ ++openjdk-6 (6b12~pre2-0ubuntu2) intrepid; urgency=low ++ ++ * Update IcedTea build infrastructure (20081012). ++ * When building as cacao, build a cacao-oj6-plugin package. ++ * When building as cacao, run the testsuite on amd64, i386, sparc. ++ * Add finnish translations to the desktop files (Timo Jyrinki). ++ Closes: #494354. ++ * Fix font paths (Rob Gom). Closes: #495988. ++ * Reenable running the testsuite. ++ * Fix pulse-java build failure on amd64. ++ ++ -- Matthias Klose Sun, 12 Oct 2008 13:05:49 +0200 ++ ++openjdk-6 (6b12~pre2-0ubuntu1) intrepid; urgency=low ++ ++ * Update IcedTea build infrastructure (20081011). ++ * debian/copyright: Add copyright notice for pulseaudio sound files. ++ * Add support to build with pulseaudio support. ++ ++ -- Matthias Klose Sat, 11 Oct 2008 15:44:50 +0200 ++ ++openjdk-6 (6b12~pre1-0ubuntu5) intrepid; urgency=low ++ ++ * Move the plugin from the -jre package into the -plugin package. ++ ++ -- Matthias Klose Fri, 10 Oct 2008 06:55:34 +0000 ++ ++openjdk-6 (6b12~pre1-0ubuntu4) intrepid; urgency=low ++ ++ * Update IcedTea build infrastructure (20081009). ++ * Configure with --enable-liveconnect, build an icedtea6-plugin package. ++ Thanks to Deepak Bhole. ++ LP: #207064, #195783, #199680, #202762, #220241, #242263, #242496, ++ #242496, #250292, #269885, #274356, #274360, #259313. ++ * Build an icedtea6-plugin package. ++ * Merge changes from 6b11-9 packaging. ++ * Build a openjdk-6-source-files packages (used as a build dependency ++ of cacao-oj6). ++ * Build depend on cacao-source for cacao-oj6 builds. ++ ++ -- Matthias Klose Thu, 09 Oct 2008 15:04:27 +0200 ++ ++openjdk-6 (6b12~pre1-0ubuntu3) intrepid; urgency=low ++ ++ * Update IcedTea build infrastructure (20080915). ++ * Reapply: openjdk-6-jre-headless: Make libnss-mdns a suggestion ++ instead of a recommendation. LP: #261847. ++ * Build-depend against fixed fastjar. LP: #267177. ++ ++ -- Matthias Klose Mon, 15 Sep 2008 20:16:51 +0200 ++ ++openjdk-6 (6b12~pre1-0ubuntu2) intrepid; urgency=low ++ ++ * Update the icedtea-hotspot patch to b12, fixing build failures ++ on zero archs (ia64, powerpc). ++ ++ -- Matthias Klose Sat, 13 Sep 2008 11:48:44 +0200 ++ ++openjdk-6 (6b12~pre1-0ubuntu1) intrepid; urgency=low ++ ++ * New code drop (b12). ++ * Update IcedTea build infrastructure (20080912). ++ ++ -- Matthias Klose Sat, 13 Sep 2008 00:41:42 +0200 ++ ++openjdk-6 (6b11-6) unstable; urgency=low ++ ++ * Set minimum heap size independent of available memory for cacao builds. ++ * Link the wrapper tools with -rdynamic for cacao builds. ++ * Update cacao based builds: ++ - Update cacao to 0.99.3, remove patches applied upstream. ++ - Fix build failures on mipsel-linux. ++ * Allow setting of the bootstrap compiler per architecture. ++ * Configure --with-alt-jar set to fastjar to speed up builds. ++ * Update IcedTea build infrastructure (20080815), remove local patches ++ integrated in IcedTea. ++ - Make use of unsigned/signed types explicit. ++ - Fix PR icedtea/184, adding support for returning floats and doubles ++ for zero builds. ++ - Fix Fix PR icedtea/181, class type checks. ++ ++ -- Matthias Klose Fri, 15 Aug 2008 16:59:48 +0000 ++ ++openjdk-6 (6b11-5) unstable; urgency=low ++ ++ * debian/rules (stamps/mauve-build): Configure with --host and --build. ++ * openjdk-6-jdk: Recommend libxt-dev (jawt_md.h header includes). ++ * Fix build issues on s390 (size_t is unsigned long). ++ ++ -- Matthias Klose Sun, 03 Aug 2008 20:05:44 +0200 ++ ++openjdk-6 (6b11-4) unstable; urgency=low ++ ++ * Update IcedTea build infrastructure (20080801). ++ - Fix typo, causing build failure on mipsel. ++ ++ -- Matthias Klose Fri, 01 Aug 2008 01:25:51 +0200 ++ ++openjdk-6 (6b11-3) unstable; urgency=low ++ ++ * Include the name of the VM used in the package description. ++ * Update IcedTea build infrastructure (20080731). ++ * Build for alpha, arm, mips and mipsel. ++ * Switch from libcupsys2(-dev) to libcups2(-dev) for newer releases. ++ (Closes: #492384) ++ * Add symlinks for header files found in JAVA_HOME/include/linux in ++ JAVA_HOME/include. ++ * openjdk-6-jre: Don' recommend lesstif2 anymore. ++ ++ -- Matthias Klose Thu, 31 Jul 2008 17:16:59 +0200 ++ ++openjdk-6 (6b11-2ubuntu1) intrepid; urgency=low ++ ++ * xvfb seems to be broken when running with intrepid and an intrepid kernel. ++ Nevertheless, run xvfb-run -s "-extension GLX" on the buildds (hardy kernels). ++ * Stop the buildwatch process after the testsuite finishes. ++ ++ -- Matthias Klose Fri, 25 Jul 2008 12:33:44 +0200 ++ ++openjdk-6 (6b11-2) unstable; urgency=low ++ ++ * Update IcedTea build infrastructure (20080724). ++ * debian/buildwatch.sh: Track long building files with no visible output. ++ * Fix build failure when not running the mauve testsuite. ++ * Disable running the testsuite for cacao builds (leaving processes around). ++ * Don't set a soversion for the cacao libjvm. ++ * Configure with --host and --build. ++ * Call xvfb-run with -s "-extension GLX". ++ ++ -- Matthias Klose Thu, 24 Jul 2008 22:25:30 +0200 ++ ++openjdk-6 (6b11-1ubuntu2) intrepid; urgency=low ++ ++ * Update IcedTea build infrastructure (20080724). ++ * debian/buildwatch.sh: Track long building files with no visible output. ++ * Fix build failure when not running the mauve testsuite. ++ * Disable running the testsuite for cacao builds (leaving processes around). ++ * Don't set a soversion for the cacao libjvm. ++ * Configure with --host and --build. ++ ++ -- Matthias Klose Thu, 24 Jul 2008 17:58:53 +0200 ++ ++openjdk-6 (6b11-1ubuntu1) intrepid; urgency=low ++ ++ * Regenerate the control file. ++ ++ -- Matthias Klose Wed, 23 Jul 2008 00:42:16 +0200 ++ ++openjdk-6 (6b11-1) unstable; urgency=low ++ ++ * New code drop (b11). ++ * Update IcedTea build infrastructure (20080721). ++ * debian/generate-dfsg.sh: Update for b11. ++ * debian/patches/const_strings.patch, debian/patches/issue-6659207.diff: ++ Remove, applied upstream. ++ * debian/generate-debian-orig.sh: Fix /proc check. ++ * debian/control.in: Loosen dependency of -jre-lib on -jre. ++ * Support `nodocs' in DEB_BUILD_OPTIONS. ++ * Remove build-dependency on lesstif2-dev. ++ * Bootstrap using gcj on all archs; the 6b10dfsg-2 packages are broken. ++ * Run the jtreg harness in a virtual X environment. ++ * Install javazic.jar in the jre-lib package. ++ * Don't run the testsuite on arm; the build already takes days; only ++ run the testsuite on hotspot archs and powerpc. ++ ++ -- Matthias Klose Wed, 23 Jul 2008 00:28:12 +0200 ++ ++openjdk-6 (6b10dfsg-2) unstable; urgency=low ++ ++ * Update IcedTea build infrastructure (20080714). ++ * On arm configure cacao builds with --enable-softfloat. ++ * Don't run the mauve testsuite for cacao builds (hangs the test runner ++ process). ++ * Don't configure cacao builds with --enable/-disable-zero. ++ * Don't remove alternatives on upgrade. ++ * debian/copyright: Add license for NanoXML. ++ * Do recommends ttf-indic-fonts instead of ttf-indic-fonts-core for ++ lenny and sid. Closes: #490619. ++ * Ignore errors when registering the jar binfmt. The alternative may ++ already be registered by another JVM. Closes: #490594. ++ * openjdk-6-jre-headless: Depend on ttf-dejavu-core instead of ttf-dejavu. ++ * On amd64, i386 and sparc, bootstrap using openjdk. ++ ++ -- Matthias Klose Mon, 14 Jul 2008 19:41:19 +0200 ++ ++openjdk-6 (6b10dfsg-1ubuntu2) intrepid; urgency=low ++ ++ * Update IcedTea build infrastructure (20080702). ++ - Do not configure --with-shark by default. ++ - Update license headers from jdk7. ++ * Start debian/buildwatch.sh for armel and sparc builds as well. ++ * Allow configuration with --with-cacao. ++ ++ -- Matthias Klose Mon, 30 Jun 2008 13:30:06 +0200 ++ ++openjdk-6 (6b10dfsg-1ubuntu1) intrepid; urgency=low ++ ++ * The rebuilt upstream tarball now has GPL-compatible free software licenses ++ and documented copyrights and licenses. LP: #238569. ++ * Fixed name of the xulrunner-1.9-javaplugin.so in the .jinfo file. ++ LP: #226911. ++ * Ignore errors during activation of class data sharing. Closes: #490617, ++ #490672. ++ ++ -- Matthias Klose Mon, 30 Jun 2008 13:30:06 +0200 ++ ++openjdk-6 (6b10dfsg-1) unstable; urgency=low ++ ++ * Rebuild the upstream tar ball: ++ - Remove the jscheme jar files. ++ - Apply the patch from Iris Clark (Sun) for the copyright headers ++ (http://icedtea.classpath.org/bugzilla/show_bug.cgi?id=141#c4). ++ - Remove jdk/src/share/classes/java/lang/instrument/package.html. ++ - Upload to main. ++ * Update IcedTea build infrastructure (20080628). ++ * Build an openjdk-6-dbg package. ++ * patches/gcc-mtune-generic.diff: Fix typo. ++ * openjdk-6-jre: Depend on ttf-dejavu. ++ * debian/copyright: Add two more copyright holders, not directly ++ mentioned in the third party readme. ++ Replace Apache 2.0 license with pointer to common-licenses. ++ * Fix more lintian warnings. ++ * debian/sun_java_app.xpm: Downsize icon to 32x32 pixels. ++ * Build-depend/depend on rhino. ++ ++ -- Matthias Klose Sun, 29 Jun 2008 17:42:51 +0200 ++ ++openjdk-6 (6b10-1) unstable; urgency=low ++ ++ * New code drop (b10). ++ - Still some non-compliant license headers found in ++ openjdk/corba/src/share/classes/com/sun/tools/corba/se/idl. ++ - Upload to non-free. ++ * Update IcedTea build infrastructure (20080603). ++ * In fontconfig.properties, fix Korean font names and add paths to the Luxi ++ fonts for Motif. Add fonts for Indic languages. ++ * Install compilefontconfig.jar in openjdk-6-jre-lib package. ++ * Run the testsuites of hotspot, langtools and jdk. ++ * Include the jscheme-5.0 sources in the tarball, mention the jscheme ++ licenses in debian/copyright. ++ * Use the certificates provided by the ca-certificates-java package. ++ * More fontconfig updates (Arne Goetje). ++ * Fix some lintian warnings. ++ * Correct build-dependency for openjdk based bootstraps. ++ ++ -- Matthias Klose Wed, 04 Jun 2008 01:46:52 +0200 ++ ++openjdk-6 (6b09-1~pre1) unstable; urgency=low ++ ++ [ Matthias Klose ] ++ * New code drop (b09). ++ * Update IcedTea build infrastructure (20080528). ++ - Add missing color profiles. LP: #225174. ++ - Moved system properties defined in hotspot to TrustManagerFactoryImpl. ++ LP: #224455. ++ * 6636469_v4.diff: Remove, committed in IcedTea. ++ * debian/control: Update Vcs-* attributes. ++ * debian/JB-jre-headless.p*.in: Fix update-binfmts calls. ++ * Compress the man pages, fixing the slave symlinks of the alternatives. ++ * javaws.desktop: Add `%u' to the Exec key, remove -viewer option. ++ * openjdk-6-jre-headless: Recommends libnss-mdns. ++ * openjdk-6-jre-headless: Warn about unmounted /proc file system. ++ * debian/JB-jre.mime.in: Remove the -viewer option from command (Tan Rui ++ Boon). ++ * Add a `docs' symlink pointing to /usr/share/doc/openjdk-6. LP: #218405. ++ * Set maintainer to the team list. ++ * Add copyright notices for patches and generated files. ++ * Add helper scripts to modify upstream tarball and generate the debian ++ tarball. ++ * Fix names for browser alternatives in jinfo file, set browser_plugin_dirs ++ unconditionally. ++ * Recommend the ttf-wqy-zenhei font instead of ttf-arphic-uming, if the ++ latter is available in Truetype Collection (TTC) format only, add the ++ fontconfig changes as a patch. ++ * Make the cjk font packages configurable in the control file. ++ * Use GCC-4.3 on all platforms where available. ++ * Install a config file swing.properties, allowing a user to change ++ the default look and feel. LP: #229112. ++ * When trying to determine the executable name reading /proc/self/exe, ++ discard known prefixes used for unionfs mounts. LP: #224110. ++ * Explicitely configure with --disable-zero on hotspot architectures. ++ * Add fix for issue 6659207, access violation in CompilerThread0. ++ Addresses #478560. Needs checking: LP: #229207. ++ * Disable building the docs on ia64, powerpc and sparc, we don't build ++ architecture independent packages on these architectures. ++ * Explicitely configure --with-parallel-jobs, needed by the updated IcedTea. ++ * Backport the linux-sparc patches, enable building on sparc. LP: #154080. ++ * Don't use an absolute path calling the compiler. ++ * Replace the OpenJDK version in desktop and menu files. ++ * Install menu files. ++ * Install openjdk-6-java.desktop in -jre, instead of -jre-headless. ++ ++ -- Matthias Klose Wed, 14 May 2008 08:49:54 +0200 ++ ++openjdk-6 (6b08-1) unstable; urgency=low ++ ++ [ Torsten Werner ] ++ * first upload to Debian (Closes: #452750) ++ * Regenerate debian/control. ++ * Switch to bzip2 package compression in Debian but leave lzma compression ++ in Ubuntu. ++ * Temporarily downgrade Depends: tzdata-java to Recommends until the package ++ becomes available in Debian. ++ * Add myself to Uploaders. ++ * Do not install extras license file in openjdk-6-jre-lib. ++ * Add patch shebang.diff to fix a lintian warning. ++ * Install openjdk-6-java.desktop into the correct binary package. ++ * Improve some package descriptions. ++ * Remove some empty directories from binary packages. ++ * Install README.Debian in every binary package and give it some useful ++ content. ++ * Install java-rmi.cgi in package openjdk-6-jre-headless and mention it in ++ README.Debian. ++ * Install /usr/bin/jexec via update-alternatives. ++ * Downgrade Depends: java-common (>= 0.28). ++ * Add patch jexec.diff to make the jexec binary executable without ++ specifying an absolute path. ++ * Add Build-Depends: xauth and xfonts-base for mauve. ++ * Update and install the lintian override files. ++ * Replace all occurences of binary:Version and source:Version by ++ Source-Version to be compatible with Ubuntu release 6.06. ++ * Remove Conflicts: gcjwebplugin. ++ ++ [ Michael Koch ] ++ * Fixed Vcs-Bzr and Vcs-Browser fields. ++ * Removed Encoding entry from all debian/*.desktop.in files. ++ ++ [ Matthias Klose ] ++ * Make Java Full Screen Exclusive Mode work again with Xorg Server 1.3 ++ and above (Dan Munckton). LP: #154613 (Java bug 6636469). ++ * Configure with --enable-zero on all archs except amd64, i386, lpia. ++ * Update IcedTea build infrastructure. ++ * Handle binary files in updates of the build infrastructure. ++ * Enable bootstrap/normal builds per architecture. ++ * javaws-wrapper.sh: Use readlink --canonicalize. LP: #211515. ++ * binfmt-support: Handle /usr/share/binfmts/jar as a slave symlink of ++ the jexec alternative, install the binfmt file in the jre libdir. ++ Use the jexec alternative in the binfmt file. ++ * JB-jre-headless.postinst.in: Remove sun-java6 specific chunks. ++ * Differentiate between the openjdk version required as dependency and ++ as build dependency. ++ ++ -- Torsten Werner Wed, 09 Apr 2008 10:12:55 +0200 ++ ++openjdk-6 (6b08-0ubuntu1) hardy; urgency=low ++ ++ * New code drop (b08). ++ * Update IcedTea build infrastructure. ++ * Move binfmt-support references from -jre to -jre-headless package. ++ * Don't fail on purge, if /var/lib/binfmts/openjdk-6 is missing. LP: #206721. ++ * Only use the basename for icons in desktop files. LP: #207413. ++ * Install javaws(1). LP: #191297. ++ * Install a wrapper script for javaws, which calls `javaws -viewer' if no ++ arguments are given (or else starting javaws from the desktop menu ++ would not do anything). ++ * debian/JB-web-start.applications.in: Remove the -viewer option from command. ++ ++ -- Matthias Klose Fri, 28 Mar 2008 16:10:32 +0000 ++ ++openjdk-6 (6b07-0ubuntu1) hardy; urgency=low ++ ++ * New code drop (b07). ++ * Update IcedTea build infrastructure. ++ * debian/copyright: Update to OpenJDK Trademark Notice v1.1. ++ ++ -- Matthias Klose Sat, 22 Mar 2008 22:41:42 +0100 ++ ++openjdk-6 (6b06-0ubuntu12) hardy; urgency=low ++ ++ * Update icon locations in menu files. ++ * openjdk-6-jre-headless: Provide java-virtual-machine. LP: #189953. ++ * openjdk-6-jre-headless: Add a conflict to gcjwebplugin; for openjdk ++ use the icetea-gcjwebplugin, for gij the java-gcj-compat-plugin. ++ ++ -- Matthias Klose Sat, 22 Mar 2008 20:12:41 +0100 ++ ++openjdk-6 (6b06-0ubuntu11) hardy; urgency=low ++ ++ * Update IcedTea to 20080319. ++ * Move rt.jar into the openjdk-6-jre-headless package; sun/awt/X11 ++ class files differ between amd64 and i386. ++ * Install all desktop files in /usr/share/applications. ++ ++ -- Matthias Klose Wed, 19 Mar 2008 23:53:36 +0100 ++ ++openjdk-6 (6b06-0ubuntu10) hardy; urgency=low ++ ++ * Remove print-lsb-release.patch, forwarded to IcedTea. ++ * Fix IcedTea configure to detect libffi without pkg-config. ++ ++ -- Matthias Klose Wed, 12 Mar 2008 20:28:55 +0100 ++ ++openjdk-6 (6b06-0ubuntu9) hardy; urgency=low ++ ++ * Build-depend on libffi4-dev on architectures other than amd64, i386, lpia. ++ * Install icons in /usr/share/pixmaps, not /usr/share/icons. ++ * debian/rules: Call dh_icons. ++ ++ -- Matthias Klose Wed, 12 Mar 2008 11:34:44 +0100 ++ ++openjdk-6 (6b06-0ubuntu8) hardy; urgency=low ++ ++ * Tighten dependency on java-common. ++ * debian/copyright: Include trademark notice. ++ * debian/control: Mention IcedTea in the package descriptions. ++ * Update IcedTea to 20080311. ++ ++ -- Matthias Klose Tue, 11 Mar 2008 21:39:27 +0100 ++ ++openjdk-6 (6b06-0ubuntu7) hardy; urgency=low ++ ++ * Build-depend on unzip. ++ ++ -- Matthias Klose Fri, 07 Mar 2008 16:47:43 +0100 ++ ++openjdk-6 (6b06-0ubuntu6) hardy; urgency=low ++ ++ * Build-depend on zip. ++ ++ -- Matthias Klose Fri, 07 Mar 2008 16:16:52 +0100 ++ ++openjdk-6 (6b06-0ubuntu5) hardy; urgency=low ++ ++ * debian/mauve_tests: javax.swing.text.html.HTML.ElementTagAttributeTest, ++ removed, tries to access the network. ++ * debian/README.alternatives.in: Update for --jre-headless. ++ * debian/rules: Fix paths for OpenJDK based bootstrap. ++ * Compress packages using lzma. ++ * Drop build dependency on zip, unzip. ++ * Fix build infrastructure to bootstrap with OpenJDK instead of ecj. ++ * Do not build the gcjwebplugin from the OpenJDK source. ++ ++ -- Matthias Klose Fri, 07 Mar 2008 13:53:15 +0100 ++ ++openjdk-6 (6b06-0ubuntu4) hardy; urgency=low ++ ++ * Don't register a java-rmi.cgi alternative in /usr/bin. ++ ++ -- Matthias Klose Thu, 06 Mar 2008 17:59:35 +0100 ++ ++openjdk-6 (6b06-0ubuntu3) hardy; urgency=low ++ ++ * Split out a openjdk-6-jre-headless package, depend on java-common, ++ supporting update-java-alternatives --jre-headless. ++ * Make openjdk-6-jre-headless and openjdk-6-jre architecture any. ++ * New package openjdk-6-jre-lib (arch all). ++ * Remove openjdk-6-bin package. ++ * debian/patches/openjdk-ubuntu-branding.patch: New patch. ++ * Install images/cursors/cursors.properties as a config file. ++ * Do not compress demos and examples in the -demo package. ++ * openjdk-6-jre: Add dependency on libxinerama1. ++ * Update IcedTea to 20080305. ++ * Don't generate cacerts ourself, but depend on ca-certificates, ++ fix location of javax.net.ssl.trustStore property. ++ * Build-depend on mauve and xvfb; run some mauve tests (the list of ++ tests taken from the Fedora package). ++ * Keep a backup of the `generated' directory; some files are regenerated ++ differently, increasing the size of the diff. ++ ++ -- Matthias Klose Thu, 06 Mar 2008 10:05:39 +0100 ++ ++openjdk-6 (6b06-0ubuntu2) hardy; urgency=low ++ ++ * Re-add gawk and pkg-config as build dependencies. ++ ++ -- Matthias Klose Tue, 04 Mar 2008 12:20:21 +0100 ++ ++openjdk-6 (6b06-0ubuntu1) hardy; urgency=low ++ ++ * New code drop (b06). ++ * Remove java-access-bridge tarball, use an externally built package. ++ * Update IcedTea to 20080304. ++ * Don't use any compiler flags from the environment. ++ ++ -- Matthias Klose Tue, 04 Mar 2008 09:16:59 +0100 ++ ++openjdk-6 (6b05-0ubuntu1) hardy; urgency=low ++ ++ * First public OpenJDK upstream code drop (b05). ++ * Depend on tzdata-java. ++ ++ -- Matthias Klose Fri, 29 Feb 2008 19:05:42 +0100 ++ ++icedtea-java7 (7~b24-1.5+20080118-2) UNRELEASED; urgency=low ++ ++ * Fix removal of alternatives. ++ ++ -- Matthias Klose Sat, 26 Jan 2008 18:41:40 +0100 ++ ++icedtea-java7 (7~b24-1.5+20080118-1) hardy; urgency=low ++ ++ * Fix installation of the plugin for firefox-3.0. ++ ++ -- Matthias Klose Sat, 19 Jan 2008 15:10:18 +0100 ++ ++icedtea-java7 (7~b24-1.5+20080118-1~ppa1) hardy; urgency=low ++ ++ * Update IcedTea to 20080118. ++ * Fix another build failure when gcc version != gcj version. ++ * Use the versioned compiler to build the corba parts. ++ * Register plugin for firefox-3.0. ++ * Build using GCC-4.3. ++ ++ -- Matthias Klose Fri, 18 Jan 2008 21:15:08 +0100 ++ ++icedtea-java7 (7~b24-1.5-2) UNRELEASED; urgency=low ++ ++ * First upload to Debian. Closes: #452750. ++ * debian/control.in: ++ - Moved package from universe/devel section to devel. ++ - Put icedtea-java7-doc into doc section. ++ - Added Homepage field and removed Homepage pseudo field from ++ descriptions. ++ - Updated Standards-Version to 3.7.3. ++ * debian/rules: ++ - Check if Makefile exists before called clean in clean target. ++ * debian/copyright: Converted to UTF-8. ++ ++ -- Michael Koch Sat, 05 Jan 2008 13:34:46 -0100 ++ ++icedtea-java7 (7~b24-1.5-1) hardy; urgency=low ++ ++ * Update to IcedTea-1.5. ++ * debian/patches/ssp-interpreter.diff: Use bash to call the ssp script, ++ backslash handling differs betweend dash and bash. ++ ++ -- Matthias Klose Fri, 04 Jan 2008 09:58:27 +0100 ++ ++icedtea-java7 (7~b24-1.5~20071214-1) hardy; urgency=low ++ ++ * New upstream snapshot (b24). ++ * Update java-access-bridge to 1.21.1. ++ * On powerpc keep the build alive using buildwatch script. ++ * Do not install menu entries for javaws, ControlPanel. LP: #155553, #156673. ++ * README.alternatives: Fix example. LP: #157063. ++ ++ -- Matthias Klose Fri, 14 Dec 2007 11:09:22 +0100 ++ ++icedtea-java7 (7~b23-1.5~20071124-4) hardy; urgency=low ++ ++ * Allow libungif4-dev as alternative build dependency. ++ * On powerpc keep the build alive using buildwatch script. ++ * Always build the plugin using libxul-dev. ++ ++ -- Matthias Klose Mon, 10 Dec 2007 15:20:16 +0100 ++ ++icedtea-java7 (7~b23-1.5~20071124-3) hardy; urgency=low ++ ++ * Lower optimization to -O2 for building the jdk on amd64. ++ * Reenable parallel build. ++ * Link using -Bsymbolic-functions. ++ * debian/patches/arguments.patch, debian/patches/const_strings.patch: ++ New patches (Arnaud Vandyck). ++ ++ -- Matthias Klose Thu, 29 Nov 2007 18:50:41 +0100 ++ ++icedtea-java7 (7~b23-1.5~20071124-2) hardy; urgency=low ++ ++ * Bootstrap using ecj/gcj. ++ * Build using g++-4.1 on i386, lpia, powerpc. ++ ++ -- Matthias Klose Wed, 28 Nov 2007 14:46:48 +0000 ++ ++icedtea-java7 (7~b23-1.5~20071124-1) hardy; urgency=low ++ ++ * New upstream snapshot (b23). ++ ++ -- Matthias Klose Tue, 27 Nov 2007 15:48:08 +0100 ++ ++icedtea-java7 (7~b22-1.5~20071018-0ubuntu3) hardy; urgency=low ++ ++ * Revert the previous change, require 7~b22 versions again. ++ ++ -- Matthias Klose Sat, 20 Oct 2007 14:33:26 +0000 ++ ++icedtea-java7 (7~b22-1.5~20071018-0ubuntu2) hardy; urgency=low ++ ++ * Loosen dependencies to 7~b21, until package is built on all archs. ++ ++ -- Matthias Klose Sat, 20 Oct 2007 07:48:36 +0000 ++ ++icedtea-java7 (7~b22-1.5~20071018-0ubuntu1) hardy; urgency=low ++ ++ * New upstream snapshot (b22). ++ * Don't use parallel builds to make the build log better comparable. ++ ++ -- Matthias Klose Fri, 19 Oct 2007 19:49:51 +0200 ++ ++icedtea-java7 (7~b21-1.4+20071007-0ubuntu5) gutsy; urgency=low ++ ++ * Update icedtea to vcs 20071012 (1.4 release), checked that the plugin ++ works on amd64. ++ * debian/copyright: Update "License Headers" section. ++ ++ -- Matthias Klose Sun, 14 Oct 2007 19:55:12 +0200 ++ ++icedtea-java7 (7~b21-1.4+20071007-0ubuntu4) gutsy; urgency=low ++ ++ * Bootstrapping trigger upload for final step of bootstrapping i386, amd64, ++ lpia. ++ ++ -- LaMont Jones Wed, 10 Oct 2007 22:04:25 -0600 ++ ++icedtea-java7 (7~b21-1.4+20071007-0ubuntu3) gutsy; urgency=low ++ ++ * Add build dependency on bzip2. ++ ++ -- Matthias Klose Wed, 10 Oct 2007 09:00:40 +0000 ++ ++icedtea-java7 (7~b21-1.4+20071007-0ubuntu2) gutsy; urgency=low ++ ++ * Add build dependency on ant. ++ ++ -- Matthias Klose Wed, 10 Oct 2007 06:33:14 +0000 ++ ++icedtea-java7 (7~b21-1.4+20071007-0ubuntu1) gutsy; urgency=low ++ ++ * Update icedtea to vcs 20071007. ++ * Update java-access-bridge to 1.20.0. ++ * Add build-dependency on libxinerama-dev. ++ * Add Xb-Npp-xxx tags for the -plugin package. ++ * Name the plugin "GCJ Web Browser Plugin (using IcedTea)", GCJ now ++ "Great Cool Java" (according to Michael Koch). ++ * Compress binary-indep packages using bzip2. ++ ++ -- Matthias Klose Sun, 7 Oct 2007 23:43:28 +0200 ++ ++icedtea-java7 (7~b21-1.4+20071002-0ubuntu2) gutsy; urgency=low ++ ++ * Rebuild using icedtea 7~b21. ++ ++ -- Matthias Klose Tue, 02 Oct 2007 19:45:21 +0200 ++ ++icedtea-java7 (7~b21-1.4+20071002-0ubuntu1) gutsy; urgency=low ++ ++ * New upstream snapshot (b21). ++ * Correctly unregister the executable binary format in the -bin package. ++ * Assemble the debian/copyright file. ++ ++ -- Matthias Klose Tue, 2 Oct 2007 09:59:37 +0200 ++ ++icedtea-java7 (7~b19-1.3+20070905-0ubuntu1) gutsy; urgency=low ++ ++ * New upstream snapshot (b19). ++ * Build using g++-4.2 on amd64, using g++-4.1 on i386 and lpia. ++ * Build without -Werror, upstream source is not yet warning clean. ++ * Support DEB_BUILD_OPTIONS=parallel=. ++ * Add build dependency on wget. ++ * Add font suggestions. ++ * Changed font configuration to fix CJK font support (Arne Goetje). ++ * Fix location of the plugin, when registering the plugin alternatives. ++ ++ -- Matthias Klose Thu, 6 Sep 2007 00:46:40 +0200 ++ ++icedtea-java7 (7~b18-1.2+20070822-0ubuntu2) gutsy; urgency=low ++ ++ * Create browser plugin directories. ++ * Build-depend on liblcms1-dev. ++ * Recommend packages with dlopened libraries: liblcms1, lesstif2, libcupsys2, ++ libgtk2.0-0, libgnome2-0, libgnomevfs2-0, libgconf2-4, libxrandr2, ++ libgl1-mesa-glx. ++ ++ -- Matthias Klose Tue, 21 Aug 2007 17:21:00 +0200 ++ ++icedtea-java7 (7~b18-1.2+20070822-0ubuntu1) gutsy; urgency=low ++ ++ * New upstream version. ++ * Include java-access-bridge. ++ * Build -doc, -plugin packages. ++ * Register alternatives, priority 1060. ++ ++ -- Matthias Klose Tue, 21 Aug 2007 17:21:00 +0200 ++ ++icedtea-java7 (7~b17-1.2+20070808-1) gutsy; urgency=low ++ ++ * Initial release, discard the initial packaging based on cdbs. ++ * Base the packaging on the sun-javaX packages. ++ ++ -- Matthias Klose Wed, 08 Aug 2007 15:55:39 +0200 ++ ++icedtea (7~b16-20070724-1) UNRELEASED; urgency=low ++ ++ * Update upstream tarball to 7~b16, update icedtea to 20070724. ++ * debian/control: Build-depend on libfreetype6-dev. ++ * debian/rules: Configure --with-jar=/usr/bin/fastjar. ++ * Build using gcj-4.2. ++ ++ -- Matthias Klose Tue, 24 Jul 2007 11:24:55 +0200 ++ ++icedtea (7~b14-20070704-1) unstable; urgency=low ++ ++ * Initial version. ++ ++ -- Michael Koch Wed, 04 Jul 2007 08:25:31 -0100 ++ diff --cc debian/compat index 000000000,000000000..b4de39476 new file mode 100644 --- /dev/null +++ b/debian/compat @@@ -1,0 -1,0 +1,1 @@@ ++11 diff --cc debian/control index 000000000,000000000..2172a5d61 new file mode 100644 --- /dev/null +++ b/debian/control @@@ -1,0 -1,0 +1,181 @@@ ++Source: openjdk-17 ++Section: java ++Priority: optional ++Maintainer: OpenJDK Team ++Uploaders: Matthias Klose ++Build-Depends: debhelper (>= 11), ++ m4, lsb-release, zip, unzip, ++ sharutils, gawk, cpio, pkg-config, procps, wdiff, fastjar (>= 2:0.96-0ubuntu2), ++ time, strip-nondeterminism, debugedit (>= 4.16), ++ jtreg6 (>= 6+1-0~) , libtestng7-java , xvfb , xauth , xfonts-base , libgl1-mesa-dri [!x32] , xfwm4 , x11-xkb-utils , dbus-x11 , googletest , google-mock , xvfb , libasmtools-java , ++ autoconf, automake, autotools-dev, ant, ant-optional, ++ g++-12 , ++ openjdk-17-jdk-headless:native | openjdk-16-jdk-headless:native, ++ libxtst-dev, libxi-dev, libxt-dev, libxaw7-dev, libxrender-dev, libcups2-dev, libasound2-dev, liblcms2-dev, libfreetype6-dev (>= 2.2.1), libxinerama-dev, libkrb5-dev, xsltproc, libpcsclite-dev, libxrandr-dev, libelf-dev, libfontconfig1-dev, libgtk2.0-0 | libgtk-3-0, libharfbuzz-dev, ++ libffi-dev, libffi-dev:native, ++ zlib1g-dev:native, zlib1g-dev, libattr1-dev, libpng-dev, libjpeg-dev, libgif-dev, ++ libnss3-dev (>= 2:3.17.1), ++ openjdk-17-jdk-headless , ++Build-Depends-Indep: graphviz, pandoc, ++Rules-Requires-Root: no ++Standards-Version: 4.6.2 ++Homepage: https://openjdk.java.net/ ++Vcs-Browser: https://salsa.debian.org/openjdk-team/openjdk/tree/openjdk-17 ++Vcs-Git: https://salsa.debian.org/openjdk-team/openjdk.git -b openjdk-17 ++ ++Package: openjdk-17-jdk-headless ++Architecture: alpha amd64 armel armhf arm64 i386 ia64 mips mipsel mips64 mips64el powerpc ppc64 ppc64el m68k riscv64 sh4 sparc sparc64 s390x x32 ++Multi-Arch: same ++Pre-Depends: ${dpkg:Depends} ++Depends: openjdk-17-jre-headless (= ${binary:Version}), ++ ${shlibs:Depends}, ${misc:Depends} ++Suggests: openjdk-17-demo, openjdk-17-source ++Provides: java-sdk-headless (= ${vm:Version}), java2-sdk-headless, ++ java5-sdk-headless, java6-sdk-headless, ++ java7-sdk-headless, java8-sdk-headless, ++ java9-sdk-headless, java10-sdk-headless, ++ java11-sdk-headless, java12-sdk-headless, ++ java13-sdk-headless, java14-sdk-headless, java15-sdk-headless, ++ java16-sdk-headless, java17-sdk-headless, ++ java-compiler ++Breaks: openjdk-17-jre-headless (<< 17~10) ++Replaces: openjdk-17-jre-headless (<< 17~10) ++Description: OpenJDK Development Kit (JDK) (headless) ++ OpenJDK is a development environment for building applications, ++ applets, and components using the Java programming language. ++ ++Package: openjdk-17-jre-headless ++Architecture: alpha amd64 armel armhf arm64 i386 ia64 mips mipsel mips64 mips64el powerpc ppc64 ppc64el m68k riscv64 sh4 sparc sparc64 s390x x32 ++Multi-Arch: same ++Pre-Depends: ${dpkg:Depends} ++Depends: ${jredefault:Depends}, ${cacert:Depends}, ++ ${jcommon:Depends}, ${dlopenhl:Depends}, ++ ${mountpoint:Depends}, ++ ${shlibs:Depends}, ${misc:Depends} ++Recommends: ${dlopenhl:Recommends} ++Suggests: libnss-mdns, ++ fonts-dejavu-extra, ++ fonts-ipafont-gothic, fonts-ipafont-mincho, fonts-wqy-microhei | fonts-wqy-zenhei, fonts-indic, ++Provides: java-runtime-headless (= ${vm:Version}), java2-runtime-headless, ++ java5-runtime-headless, java6-runtime-headless, ++ java7-runtime-headless, java8-runtime-headless, ++ java9-runtime-headless, java10-runtime-headless, ++ java11-runtime-headless, java12-runtime-headless, ++ java13-runtime-headless, java14-runtime-headless, java15-runtime-headless, ++ java16-runtime-headless, java17-runtime-headless, ++ ${defaultvm:Provides}, ${jvm:Provides} ++Breaks: ${jrehl:Breaks} ++Conflicts: oracle-java11-installer, openjdk-11-jdk-headless (<< 11~18-2) ++Replaces: openjdk-11-jdk-headless (<< 11~18-2) ++Description: OpenJDK Java runtime, using ${vm:Name} (headless) ++ Minimal Java runtime - needed for executing non GUI Java programs, ++ using ${vm:Name}. ++ ++Package: openjdk-17-jdk ++Architecture: alpha amd64 armel armhf arm64 i386 ia64 mips mipsel mips64 mips64el powerpc ppc64 ppc64el m68k riscv64 sh4 sparc sparc64 s390x x32 ++Multi-Arch: same ++Pre-Depends: ${dpkg:Depends} ++Depends: openjdk-17-jre (= ${binary:Version}), ++ openjdk-17-jdk-headless (= ${binary:Version}), ++ ${shlibs:Depends}, ${misc:Depends} ++Recommends: libxt-dev ++Suggests: openjdk-17-demo, openjdk-17-source, visualvm ++Provides: java-sdk (= ${vm:Version}), java2-sdk, java5-sdk, java6-sdk, ++ java7-sdk, java8-sdk, java9-sdk, java10-sdk, java11-sdk, ++ java12-sdk, java13-sdk, java14-sdk, java15-sdk, java16-sdk, java17-sdk, ++ java-compiler ++Description: OpenJDK Development Kit (JDK) ++ OpenJDK is a development environment for building applications, ++ applets, and components using the Java programming language. ++ ++Package: openjdk-17-jre ++Architecture: alpha amd64 armel armhf arm64 i386 ia64 mips mipsel mips64 mips64el powerpc ppc64 ppc64el m68k riscv64 sh4 sparc sparc64 s390x x32 ++Multi-Arch: same ++Pre-Depends: ${dpkg:Depends} ++Depends: openjdk-17-jre-headless (= ${binary:Version}), ++ ${xandsound:Depends}, ${dlopenjre:Depends}, ++ ${shlibs:Depends}, ${misc:Depends} ++Recommends: ${dlopenjre:Recommends}, ${bridge:Recommends}, fonts-dejavu-extra ++Provides: java-runtime (= ${vm:Version}), java2-runtime, ++ java5-runtime, java6-runtime, ++ java7-runtime, java8-runtime, ++ java9-runtime, java10-runtime, ++ java11-runtime, java12-runtime, ++ java13-runtime, java14-runtime, java15-runtime, ++ java16-runtime, java17-runtime, ++Breaks: openjdk-17-jre-headless (<< 17~10) ++Replaces: openjdk-17-jre-headless (<< 17~10) ++Description: OpenJDK Java runtime, using ${vm:Name} ++ Full Java runtime environment - needed for executing Java GUI and Webstart ++ programs, using ${vm:Name}. ++ ++Package: openjdk-17-demo ++Architecture: alpha amd64 armel armhf arm64 i386 ia64 mips mipsel mips64 mips64el powerpc ppc64 ppc64el m68k riscv64 sh4 sparc sparc64 s390x x32 ++Priority: optional ++Pre-Depends: ${dpkg:Depends} ++Depends: openjdk-17-jre (= ${binary:Version}), ++ ${shlibs:Depends}, ${misc:Depends} ++Description: Java runtime based on OpenJDK (demos and examples) ++ OpenJDK Java runtime ++ ++Package: openjdk-17-source ++Architecture: all ++Multi-Arch: foreign ++Priority: optional ++Pre-Depends: ${dpkg:Depends} ++Depends: openjdk-17-jdk (>= ${source:Version}), ++ ${misc:Depends} ++Description: OpenJDK Development Kit (JDK) source files ++ OpenJDK is a development environment for building applications, ++ applets, and components using the Java programming language. ++ . ++ This package contains the Java programming language source files ++ (src.zip) for all classes that make up the Java core API. ++ ++Package: openjdk-17-doc ++Section: doc ++Pre-Depends: ${dpkg:Depends} ++Architecture: all ++Multi-Arch: foreign ++Priority: optional ++Depends: ${misc:Depends}, ++ libjs-jquery, ++ libjs-jquery-ui, ++ libjs-jquery-ui-theme-base, ++Suggests: openjdk-17-jdk ++Description: OpenJDK Development Kit (JDK) documentation ++ OpenJDK is a development environment for building applications, ++ applets, and components using the Java programming language. ++ . ++ This package contains the API documentation. ++ ++Package: openjdk-17-dbg ++Architecture: alpha amd64 armel armhf arm64 i386 ia64 mips mipsel mips64 mips64el powerpc ppc64 ppc64el m68k riscv64 sh4 sparc sparc64 s390x x32 ++Multi-Arch: same ++Priority: optional ++Section: debug ++Pre-Depends: ${dpkg:Depends} ++Depends: openjdk-17-jre-headless (= ${binary:Version}), ++ ${misc:Depends} ++Recommends: openjdk-17-jre (= ${binary:Version}) ++Suggests: openjdk-17-jdk (= ${binary:Version}) ++Conflicts: ${dbg:Conflicts} ++Description: Java runtime based on OpenJDK (debugging symbols) ++ OpenJDK is a development environment for building applications, ++ applets, and components using the Java programming language. ++ . ++ This package contains the debugging symbols. ++ ++Package: openjdk-17-jre-zero ++Architecture: amd64 i386 arm64 armhf ppc64 ppc64el riscv64 s390x ++Multi-Arch: same ++Priority: optional ++Pre-Depends: ${dpkg:Depends} ++Depends: openjdk-17-jre-headless (= ${binary:Version}), ${shlibs:Depends}, ${misc:Depends} ++Provides: ${zerovm:Provides} ++Description: Alternative JVM for OpenJDK, using Zero ++ The package provides an alternative runtime using the Zero VM. Built on ++ architectures in addition to the Hotspot VM as a debugging aid for those ++ architectures which don't have a Hotspot VM. ++ . ++ The VM is started with the option `-zero'. See the README.Debian for details. diff --cc debian/control.in index 000000000,000000000..14d27bb99 new file mode 100644 --- /dev/null +++ b/debian/control.in @@@ -1,0 -1,0 +1,167 @@@ ++Source: @basename@ ++Section: java ++Priority: optional ++Maintainer: OpenJDK Team ++Uploaders: Matthias Klose ++Build-Depends: @bd_debhelper@ ++ m4, lsb-release, zip, unzip, ++ sharutils, gawk, cpio, pkg-config, procps, wdiff, @bd_fastjar@ ++ time, @bd_strip_ndt@ @bd_debugedit@ ++ @bd_check@ ++ @bd_autotools@ @bd_ant@ ++ @bd_gcc@ ++ @bd_bootstrap@ ++ @bd_openjdk@ ++ @bd_zero@ ++ @bd_syslibs@ @bd_systemtap@ ++ @bd_nss@ ++ @bd_cross@ ++Build-Depends-Indep: graphviz, pandoc, ++Rules-Requires-Root: no ++Standards-Version: 4.6.2 ++Homepage: https://openjdk.java.net/ ++Vcs-Browser: https://salsa.debian.org/openjdk-team/openjdk/tree/openjdk-17 ++Vcs-Git: https://salsa.debian.org/openjdk-team/openjdk.git -b openjdk-17 ++ ++Package: @basename@-jdk-headless ++Architecture: @any_archs@ ++Multi-Arch: same ++Pre-Depends: ${dpkg:Depends} ++Depends: @basename@-jre-headless (= ${binary:Version}), ++ ${shlibs:Depends}, ${misc:Depends} ++Suggests: @basename@-demo, @basename@-source ++Provides: java-sdk-headless (= ${vm:Version}), java2-sdk-headless, ++ java5-sdk-headless, java6-sdk-headless, ++ java7-sdk-headless, java8-sdk-headless, ++ java9-sdk-headless, java10-sdk-headless, ++ java11-sdk-headless, java12-sdk-headless, ++ java13-sdk-headless, java14-sdk-headless, java15-sdk-headless, ++ java16-sdk-headless, java17-sdk-headless, ++ java-compiler ++Breaks: openjdk-17-jre-headless (<< 17~10) ++Replaces: openjdk-17-jre-headless (<< 17~10) ++Description: OpenJDK Development Kit (JDK) (headless) ++ OpenJDK is a development environment for building applications, ++ applets, and components using the Java programming language. ++ ++Package: @basename@-jre-headless ++Architecture: @any_archs@ ++Multi-Arch: same ++Pre-Depends: ${dpkg:Depends} ++Depends: ${jredefault:Depends}, ${cacert:Depends}, ++ ${jcommon:Depends}, ${dlopenhl:Depends}, ++ ${mountpoint:Depends}, ++ ${shlibs:Depends}, ${misc:Depends} ++Recommends: ${dlopenhl:Recommends} ++Suggests: libnss-mdns, ++ @core_fonts@, ++ @cjk_fonts@ ++Provides: java-runtime-headless (= ${vm:Version}), java2-runtime-headless, ++ java5-runtime-headless, java6-runtime-headless, ++ java7-runtime-headless, java8-runtime-headless, ++ java9-runtime-headless, java10-runtime-headless, ++ java11-runtime-headless, java12-runtime-headless, ++ java13-runtime-headless, java14-runtime-headless, java15-runtime-headless, ++ java16-runtime-headless, java17-runtime-headless, ++ ${defaultvm:Provides}, ${jvm:Provides} ++Breaks: ${jrehl:Breaks} ++Conflicts: oracle-java11-installer, openjdk-11-jdk-headless (<< 11~18-2) ++Replaces: openjdk-11-jdk-headless (<< 11~18-2) ++Description: OpenJDK Java runtime, using ${vm:Name} (headless) ++ Minimal Java runtime - needed for executing non GUI Java programs, ++ using ${vm:Name}. ++ ++Package: @basename@-jdk ++Architecture: @any_archs@ ++Multi-Arch: same ++Pre-Depends: ${dpkg:Depends} ++Depends: @basename@-jre (= ${binary:Version}), ++ @basename@-jdk-headless (= ${binary:Version}), ++ ${shlibs:Depends}, ${misc:Depends} ++Recommends: libxt-dev ++Suggests: @basename@-demo, @basename@-source, visualvm ++Provides: java-sdk (= ${vm:Version}), java2-sdk, java5-sdk, java6-sdk, ++ java7-sdk, java8-sdk, java9-sdk, java10-sdk, java11-sdk, ++ java12-sdk, java13-sdk, java14-sdk, java15-sdk, java16-sdk, java17-sdk, ++ java-compiler ++Description: OpenJDK Development Kit (JDK) ++ OpenJDK is a development environment for building applications, ++ applets, and components using the Java programming language. ++ ++Package: @basename@-jre ++Architecture: @any_archs@ ++Multi-Arch: same ++Pre-Depends: ${dpkg:Depends} ++Depends: @basename@-jre-headless (= ${binary:Version}), ++ ${xandsound:Depends}, ${dlopenjre:Depends}, ++ ${shlibs:Depends}, ${misc:Depends} ++Recommends: ${dlopenjre:Recommends}, ${bridge:Recommends}, @core_fonts@ ++Provides: java-runtime (= ${vm:Version}), java2-runtime, ++ java5-runtime, java6-runtime, ++ java7-runtime, java8-runtime, ++ java9-runtime, java10-runtime, ++ java11-runtime, java12-runtime, ++ java13-runtime, java14-runtime, java15-runtime, ++ java16-runtime, java17-runtime, ++Breaks: openjdk-17-jre-headless (<< 17~10) ++Replaces: openjdk-17-jre-headless (<< 17~10) ++Description: OpenJDK Java runtime, using ${vm:Name} ++ Full Java runtime environment - needed for executing Java GUI and Webstart ++ programs, using ${vm:Name}. ++ ++Package: @basename@-demo ++Architecture: @any_archs@ ++Priority: optional ++Pre-Depends: ${dpkg:Depends} ++Depends: @basename@-jre (= ${binary:Version}), ++ ${shlibs:Depends}, ${misc:Depends} ++Description: Java runtime based on OpenJDK (demos and examples) ++ OpenJDK Java runtime ++ ++Package: @basename@-source ++Architecture: all ++Multi-Arch: foreign ++Priority: optional ++Pre-Depends: ${dpkg:Depends} ++Depends: @basename@-jdk (>= ${source:Version}), ++ ${misc:Depends} ++Description: OpenJDK Development Kit (JDK) source files ++ OpenJDK is a development environment for building applications, ++ applets, and components using the Java programming language. ++ . ++ This package contains the Java programming language source files ++ (src.zip) for all classes that make up the Java core API. ++ ++Package: @basename@-doc ++Section: doc ++Pre-Depends: ${dpkg:Depends} ++Architecture: all ++Multi-Arch: foreign ++Priority: optional ++Depends: ${misc:Depends}, ++ libjs-jquery, ++ libjs-jquery-ui, ++ libjs-jquery-ui-theme-base, ++Suggests: @basename@-jdk ++Description: OpenJDK Development Kit (JDK) documentation ++ OpenJDK is a development environment for building applications, ++ applets, and components using the Java programming language. ++ . ++ This package contains the API documentation. ++ ++Package: @basename@-dbg ++Architecture: @any_archs@ ++Multi-Arch: same ++Priority: optional ++Section: debug ++Pre-Depends: ${dpkg:Depends} ++Depends: @basename@-jre-headless (= ${binary:Version}), ++ ${misc:Depends} ++Recommends: @basename@-jre (= ${binary:Version}) ++Suggests: @basename@-jdk (= ${binary:Version}) ++Conflicts: ${dbg:Conflicts} ++Description: Java runtime based on OpenJDK (debugging symbols) ++ OpenJDK is a development environment for building applications, ++ applets, and components using the Java programming language. ++ . ++ This package contains the debugging symbols. diff --cc debian/control.zero-jre index 000000000,000000000..7a1ef31fc new file mode 100644 --- /dev/null +++ b/debian/control.zero-jre @@@ -1,0 -1,0 +1,14 @@@ ++ ++Package: @basename@-jre-zero ++Architecture: @altzero_archs@ ++Multi-Arch: same ++Priority: optional ++Pre-Depends: ${dpkg:Depends} ++Depends: @basename@-jre-headless (= ${binary:Version}), ${shlibs:Depends}, ${misc:Depends} ++Provides: ${zerovm:Provides} ++Description: Alternative JVM for OpenJDK, using Zero ++ The package provides an alternative runtime using the Zero VM. Built on ++ architectures in addition to the Hotspot VM as a debugging aid for those ++ architectures which don't have a Hotspot VM. ++ . ++ The VM is started with the option `-zero'. See the README.Debian for details. diff --cc debian/copyright index 000000000,000000000..1ee6a55bd new file mode 100644 --- /dev/null +++ b/debian/copyright @@@ -1,0 -1,0 +1,3431 @@@ ++Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ ++Files-Excluded: ++ .github/* ++ .gitattributes ++ src/java.desktop/share/native/liblcms/cms*.c ++ src/java.desktop/share/native/liblcms/lcms2*.h ++ src/java.desktop/share/native/liblcms/*.txt ++ src/java.base/share/native/libzip/zlib/* ++ src/java.desktop/share/native/libsplashscreen/giflib/* ++ src/java.desktop/share/native/libsplashscreen/libpng/* ++ src/java.smartcardio/unix/native/libj2pcsc/MUSCLE/* ++ src/java.desktop/share/native/libjavajpeg/jc* ++ src/java.desktop/share/native/libjavajpeg/jd* ++ src/java.desktop/share/native/libjavajpeg/je* ++ src/java.desktop/share/native/libjavajpeg/jf* ++ src/java.desktop/share/native/libjavajpeg/ji*.c ++ src/java.desktop/share/native/libjavajpeg/jm* ++ src/java.desktop/share/native/libjavajpeg/jpegi* ++ src/java.desktop/share/native/libjavajpeg/jpeglib.h ++ src/java.desktop/share/native/libjavajpeg/jq* ++ src/java.desktop/share/native/libjavajpeg/jv* ++ src/java.desktop/share/native/libjavajpeg/ju* ++ src/java.desktop/share/native/libjavajpeg/README ++Source: https://github.com/openjdk/jdk17u ++Comment: ++ ------------------------------------------------------------------------------ ++ This package was debianized by Matthias Klose on ++ Wed, 08 Aug 2007 15:55:39 +0200. ++ ------------------------------------------------------------------------------ ++ Upstream Authors: ++ OpenJDK: ++ Sun Microsystems, Inc. ++ Oracle and/or its affiliates. ++ Packaging: ++ Matthias Klose ++ ------------------------------------------------------------------------------ ++ ++Files: * ++Copyright: ++ OpenJDK: ++ Copyright © 1996-2007 Sun Microsystems, Inc. ++ Copyright © 1996-2023 Oracle and/or its affiliates. ++ For third party copyrights see below (copies from the third party readme). ++ Portions Copyright © 1993-2014 IBM Corp. ++ Portions Copyright © 1997 Eastman Kodak Company. ++ Portions Copyright © 1999-2005 The Apache Software Foundation. ++ Java Access Bridge: ++ Portions Copyright © 2002-2007 Bill Haneman ++ Portions Copyright © 2002-2007 Louise Miller ++ Portions Copyright © 2002-2007 Gergõ Érdi ++ Portions Copyright © 2002-2007 Laszlo (Laca) Peter ++ Portions Copyright © 2002-2007 Jeff Cai ++ Portions Copyright © 2002-2007 George Kraft IV ++ Portions Copyright © 2002-2007 Padraig O'Briain ++ Portions Copyright © 2002-2007 Darren Kenny ++ ------------------------------------------------------------------------------ ++License: GPL with the Classpath exception ++ NOTE: the combined work which includes the upstream components below ++ carries forward the OpenJDK Assembly Exception (text included below). ++ . ++ OpenJDK: ++ OpenJDK is licensed under the GPL v2 with exceptions, ++ see `/usr/share/common-licenses/GPL-2'. ++ The exceptions are: ++ - "CLASSPATH" EXCEPTION TO THE GPL ++ - OPENJDK ASSEMBLY EXCEPTION ++ Various third party code in OpenJDK is licensed under different licenses. ++ . ++ Java Access Bridge: ++ Java Access Bridge is licensed under the LGPL v2. ++ See `/usr/share/common-licenses/LGPL-2'. ++ . ++ A Note About License Headers ++ ---------------------------- ++ . ++ Some sources downloaded from openjdk.java.net do not display the GPL ++ license header. Instances are: ++ . ++ - The files in openjdk/j2se/src/share/classes/javax/xml/stream/ seem to ++ comprise the BEA-StAX source code ++ . ++ http://ftpna2.bea.com/pub/downloads/jsr173.jar ++ . ++ with some Sun-specific modifications. We're assuming that Sun is ++ bundling BEA-StAX under the terms of the Apache License 2.0 and ++ that the modifications are owned by Sun. ++ . ++ - We are assuming that these files are owned by Sun: ++ openjdk/j2se/src/share/classes/**/resources/*.properties ++ . ++ The downloaded sources include a script that inserts proprietary ++ license headers into the source files it generates. The script ++ itself is GPL'd so we patched them to emit the GPL header. The ++ file is: ++ openjdk/j2se/make/java/nio/genExceptions.sh ++ . ++ ------------------------------------------------------------------------------ ++ "CLASSPATH" EXCEPTION TO THE GPL ++ . ++ Certain source files distributed by Sun Microsystems, Inc. are subject to ++ the following clarification and special exception to the GPL, but only where ++ Sun has expressly included in the particular source file's header the words ++ "Sun designates this particular file as subject to the "Classpath" exception ++ as provided by Sun in the LICENSE file that accompanied this code." ++ . ++ Linking this library statically or dynamically with other modules is making ++ a combined work based on this library. Thus, the terms and conditions of ++ the GNU General Public License cover the whole combination. ++ . ++ As a special exception, the copyright holders of this library give you ++ permission to link this library with independent modules to produce an ++ executable, regardless of the license terms of these independent modules, ++ and to copy and distribute the resulting executable under terms of your ++ choice, provided that you also meet, for each linked independent module, ++ the terms and conditions of the license of that module. An independent ++ module is a module which is not derived from or based on this library. If ++ you modify this library, you may extend this exception to your version of ++ the library, but you are not obligated to do so. If you do not wish to do ++ so, delete this exception statement from your version. ++ . ++ ------------------------------------------------------------------------------ ++ OPENJDK ASSEMBLY EXCEPTION ++ . ++ The OpenJDK source code made available by Sun at openjdk.java.net and ++ openjdk.dev.java.net ("OpenJDK Code") is distributed under the terms of the ++ GNU General Public License version 2 ++ only ("GPL2"), with the following clarification and special exception. ++ . ++ Linking this OpenJDK Code statically or dynamically with other code ++ is making a combined work based on this library. Thus, the terms ++ and conditions of GPL2 cover the whole combination. ++ . ++ As a special exception, Sun gives you permission to link this ++ OpenJDK Code with certain code licensed by Sun as indicated at ++ http://openjdk.java.net/legal/exception-modules-2007-05-08.html ++ ("Designated Exception Modules") to produce an executable, ++ regardless of the license terms of the Designated Exception Modules, ++ and to copy and distribute the resulting executable under GPL2, ++ provided that the Designated Exception Modules continue to be ++ governed by the licenses under which they were offered by Sun. ++ . ++ As such, it allows licensees and sublicensees of Sun's GPL2 OpenJDK Code to ++ build an executable that includes those portions of necessary code that Sun ++ could not provide under GPL2 (or that Sun has provided under GPL2 with the ++ Classpath exception). If you modify or add to the OpenJDK code, that new ++ GPL2 code may still be combined with Designated Exception Modules if the ++ new code is made subject to this exception by its copyright holder. ++ . ++ ------------------------------------------------------------------------------ ++ OpenJDK Trademark Notice ++ Version 1.1, 2008/3/10 ++ . ++ OpenJDK (the "Name") is a trademark of Sun Microsystems, Inc. (the "Owner"). ++ Owner permits any person obtaining a copy of this software (the "Software") ++ which is based on original software retrieved from one of the following ++ websites: http://download.java.net/openjdk, http://hg.openjdk.java.net/jdk6, ++ or http://openjdk.java.net (each a "Website", with the original software made ++ available by the Owner on a Website being known as the "Website Software") to ++ use the Name in package names and version strings of the Software subject to ++ the following conditions: ++ . ++ - The Software is a substantially complete implementation of the OpenJDK ++ development kit or runtime environment code made available by Owner on a ++ Website, and the vast majority of the Software code is identical to the ++ upstream Website Software; ++ . ++ - No permission is hereby granted to use the Name in any other manner, ++ unless such use constitutes "fair use." ++ . ++ - The Owner makes no warranties of any kind respecting the Name and all ++ representations and warranties, including any implied warranty of ++ merchantability, fitness for a particular purpose or non-infringement ++ are hereby disclaimed; and ++ . ++ - This notice and the following legend are included in all copies of the ++ Software or portions of it: ++ . ++ OpenJDK is a trademark or registered trademark of Sun Microsystems, ++ Inc. in the United States and other countries. ++ . ++ The Name may also be used in connection with descriptions of the Software that ++ constitute "fair use," such as "derived from the OpenJDK code base" or "based ++ on the OpenJDK source code." ++ . ++ Owner intends to revise this Notice as necessary in order to meet the needs of ++ the OpenJDK Community. Please send questions or comments about this Notice to ++ Sun Microsystems at openjdk-tm@sun.com. Revisions to this Notice will be ++ announced on the public mailing list announce@openjdk.java.net, to which you ++ may subscribe by visiting http://mail.openjdk.java.net. The latest version of ++ this Notice may be found at http://openjdk.java.net/legal. ++ . ++ ------------------------------------------------------------------------------ ++ . ++ The contents of the jdk/src/share/native/sun/security/ec/impl/ directory are ++ licensed under the LGPL-2.1. See `/usr/share/common-licenses/LGPL-2-1'. ++ . ++ ------------------------------------------------------------------------------ ++ The following licenses for third party code are taken from ++ openjdk/THIRD_PARTY_README ++ ------------------------------------------------------------------------------ ++ . ++ DO NOT TRANSLATE OR LOCALIZE. ++ ----------------------------- ++ . ++ %% This notice is provided with respect to ASM Bytecode Manipulation ++ Framework v5.0, which may be included with JRE 8, and JDK 8, and ++ OpenJDK 8. ++ . ++ --- begin of LICENSE --- ++ . ++ Copyright (c) 2000-2011 France Télécom ++ All rights reserved. ++ . ++ Redistribution and use in source and binary forms, with or without ++ modification, are permitted provided that the following conditions ++ are met: ++ . ++ 1. Redistributions of source code must retain the above copyright ++ notice, this list of conditions and the following disclaimer. ++ . ++ 2. Redistributions in binary form must reproduce the above copyright ++ notice, this list of conditions and the following disclaimer in the ++ documentation and/or other materials provided with the distribution. ++ . ++ 3. Neither the name of the copyright holders nor the names of its ++ contributors may be used to endorse or promote products derived from ++ this software without specific prior written permission. ++ . ++ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE ++ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF ++ THE POSSIBILITY OF SUCH DAMAGE. ++ . ++ --- end of LICENSE --- ++ . ++ ------------------------------------------------------------------------------ ++ . ++ %% This notice is provided with respect to BSDiff v4.3, which may be ++ included with JRE 8, JDK 8, and OpenJDK 8. ++ . ++ --- begin of LICENSE --- ++ . ++ Copyright 2003-2005 Colin Percival ++ All rights reserved ++ . ++ Redistribution and use in source and binary forms, with or without ++ modification, are permitted providing that the following conditions ++ are met: ++ 1. Redistributions of source code must retain the above copyright ++ notice, this list of conditions and the following disclaimer. ++ 2. Redistributions in binary form must reproduce the above copyright ++ notice, this list of conditions and the following disclaimer in the ++ documentation and/or other materials provided with the distribution. ++ . ++ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR ++ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL ++ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS ++ OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) ++ HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, ++ STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING ++ IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ POSSIBILITY OF SUCH DAMAGE. ++ . ++ --- end of LICENSE --- ++ . ++ ------------------------------------------------------------------------------ ++ . ++ %% This notice is provided with respect to CodeViewer 1.0, which may be ++ included with JDK 8. ++ . ++ --- begin of LICENSE --- ++ . ++ Copyright 1999 by CoolServlets.com. ++ . ++ Any errors or suggested improvements to this class can be reported as ++ instructed on CoolServlets.com. We hope you enjoy this program... your ++ comments will encourage further development! This software is distributed ++ under the terms of the BSD License. Redistribution and use in source and ++ binary forms, with or without modification, are permitted provided that the ++ following conditions are met: ++ . ++ 1. Redistributions of source code must retain the above copyright notice, this ++ list of conditions and the following disclaimer. ++ . ++ 2. Redistributions in binary form must reproduce the above copyright notice, ++ this list of conditions and the following disclaimer in the documentation ++ and/or other materials provided with the distribution. ++ . ++ Neither name of CoolServlets.com nor the names of its contributors may be ++ used to endorse or promote products derived from this software without ++ specific prior written permission. ++ . ++ THIS SOFTWARE IS PROVIDED BY COOLSERVLETS.COM AND CONTRIBUTORS ``AS IS'' AND ++ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY ++ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ++ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE." ++ . ++ . ++ --- end of LICENSE --- ++ . ++ ------------------------------------------------------------------------------- ++ . ++ %% This notice is provided with respect to Cryptix AES 3.2.0, which may be ++ included with JRE 8, JDK 8, and OpenJDK 8. ++ . ++ --- begin of LICENSE --- ++ . ++ Cryptix General License ++ . ++ Copyright (c) 1995-2005 The Cryptix Foundation Limited. ++ All rights reserved. ++ . ++ Redistribution and use in source and binary forms, with or without ++ modification, are permitted provided that the following conditions are ++ met: ++ . ++ 1. Redistributions of source code must retain the copyright notice, ++ this list of conditions and the following disclaimer. ++ . ++ 2. Redistributions in binary form must reproduce the above copyright ++ notice, this list of conditions and the following disclaimer in ++ the documentation and/or other materials provided with the ++ distribution. ++ . ++ THIS SOFTWARE IS PROVIDED BY THE CRYPTIX FOUNDATION LIMITED AND ++ CONTRIBUTORS ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, ++ INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF ++ MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ++ IN NO EVENT SHALL THE CRYPTIX FOUNDATION LIMITED OR CONTRIBUTORS BE ++ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR ++ BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, ++ WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE ++ OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN ++ IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ . ++ --- end of LICENSE --- ++ . ++ ------------------------------------------------------------------------------- ++ . ++ %% This notice is provided with respect to CUP Parser Generator for ++ Java 0.10k, which may be included with JRE 8, JDK 8, and OpenJDK 8. ++ . ++ --- begin of LICENSE --- ++ . ++ Copyright 1996-1999 by Scott Hudson, Frank Flannery, C. Scott Ananian ++ . ++ Permission to use, copy, modify, and distribute this software and its ++ documentation for any purpose and without fee is hereby granted, provided ++ that the above copyright notice appear in all copies and that both the ++ copyright notice and this permission notice and warranty disclaimer appear in ++ supporting documentation, and that the names of the authors or their ++ employers not be used in advertising or publicity pertaining to distribution of ++ the software without specific, written prior permission. ++ . ++ The authors and their employers disclaim all warranties with regard to ++ this software, including all implied warranties of merchantability and fitness. ++ In no event shall the authors or their employers be liable for any special, ++ indirect or consequential damages or any damages whatsoever resulting from ++ loss of use, data or profits, whether in an action of contract, negligence or ++ other tortious action, arising out of or in connection with the use or ++ performance of this software. ++ . ++ --- end of LICENSE --- ++ . ++ ------------------------------------------------------------------------------ ++ . ++ %% This notice is provided with respect to Document Object Model (DOM) Level 2 ++ & 3, which may be included with JRE 8, JDK 8, and OpenJDK 8. ++ . ++ --- begin of LICENSE --- ++ . ++ W3C SOFTWARE NOTICE AND LICENSE ++ . ++ http://www.w3.org/Consortium/Legal/2002/copyright-software-20021231 ++ . ++ This work (and included software, documentation such as READMEs, or other ++ related items) is being provided by the copyright holders under the following ++ license. By obtaining, using and/or copying this work, you (the licensee) ++ agree that you have read, understood, and will comply with the following terms ++ and conditions. ++ . ++ Permission to copy, modify, and distribute this software and its ++ documentation, with or without modification, for any purpose and without fee ++ or royalty is hereby granted, provided that you include the following on ALL ++ copies of the software and documentation or portions thereof, including ++ modifications: ++ . ++ 1.The full text of this NOTICE in a location viewable to users of the ++ redistributed or derivative work. ++ . ++ 2.Any pre-existing intellectual property disclaimers, notices, or terms and ++ conditions. If none exist, the W3C Software Short Notice should be included ++ (hypertext is preferred, text is permitted) within the body of any ++ redistributed or derivative code. ++ . ++ 3.Notice of any changes or modifications to the files, including the date ++ changes were made. (We recommend you provide URIs to the location from ++ which the code is derived.) ++ . ++ THIS SOFTWARE AND DOCUMENTATION IS PROVIDED "AS IS," AND COPYRIGHT HOLDERS ++ MAKE NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED, INCLUDING BUT NOT ++ LIMITED TO, WARRANTIES OF MERCHANTABILITY OR FITNESS FOR ANY PARTICULAR ++ PURPOSE OR THAT THE USE OF THE SOFTWARE OR DOCUMENTATION WILL NOT INFRINGE ANY ++ THIRD PARTY PATENTS,COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS. ++ . ++ COPYRIGHT HOLDERS WILL NOT BE LIABLE FOR ANY DIRECT, INDIRECT, SPECIAL ++ OR CONSEQUENTIAL DAMAGES ARISING OUT OF ANY USE OF THE SOFTWARE OR ++ DOCUMENTATION. The name and trademarks of copyright holders may NOT be used ++ in advertising or publicity pertaining to the software without specific, ++ written prior permission. Title to copyright in this software and any ++ associated documentation will at all times remain with copyright holders. ++ . ++ ____________________________________ ++ . ++ This formulation of W3C's notice and license became active on December 31 ++ 2002. This version removes the copyright ownership notice such that this ++ license can be used with materials other than those owned by the W3C, reflects ++ that ERCIM is now a host of the W3C, includes references to this specific ++ dated version of the license, and removes the ambiguous grant of "use". ++ Otherwise, this version is the same as the previous version and is written so ++ as to preserve the Free Software Foundation's assessment of GPL compatibility ++ and OSI's certification under the Open Source Definition. Please see our ++ Copyright FAQ for common questions about using materials from our site, ++ including specific terms and conditions for packages like libwww, Amaya, and ++ Jigsaw. Other questions about this notice can be directed to ++ site-policy@w3.org. ++ . ++ --- end of LICENSE --- ++ . ++ ------------------------------------------------------------------------------- ++ . ++ %% This notice is provided with respect to Dynalink v0.5, which may be ++ included with JRE 8, JDK 8, and OpenJDK 8. ++ . ++ --- begin of LICENSE --- ++ . ++ Copyright (c) 2009-2013, Attila Szegedi ++ . ++ All rights reserved.Redistribution and use in source and binary forms, with or ++ without modification, are permitted provided that the following conditions are ++ met:* Redistributions of source code must retain the above copyright notice, ++ this list of conditions and the following disclaimer. * Redistributions in ++ binary form must reproduce the above copyright notice, this list of ++ conditions and the following disclaimer in the documentation and/or other ++ materials provided with the distribution. * Neither the name of Attila ++ Szegedi nor the names of its contributors may be used to endorse or promote ++ products derived from this software without specific prior written permission. ++ . ++ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"AND ++ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE ++ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL ++ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR ++ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER ++ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, ++ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ OF THIS SOFTWARE, EVEN IF ADVISED OF THEPOSSIBILITY OF SUCH DAMAGE. ++ . ++ --- end of LICENSE --- ++ . ++ ------------------------------------------------------------------------------- ++ . ++ %% This notice is provided with respect to Elliptic Curve Cryptography, which ++ may be included with JRE 8, JDK 8, and OpenJDK 8. ++ . ++ You are receiving a copy of the Elliptic Curve Cryptography library in source ++ form with the JDK 8 and OpenJDK 8 source distributions, and as object code in ++ the JRE 8 & JDK 8 runtimes. ++ . ++ In the case of the JRE 8 & JDK 8 runtimes, the terms of the Oracle license do ++ NOT apply to the Elliptic Curve Cryptography library; it is licensed under the ++ following license, separately from Oracle's JDK & JRE. If you do not wish to ++ install the Elliptic Curve Cryptography library, you may delete the library ++ named libsunec.so (on Solaris and Linux systems) or sunec.dll (on Windows ++ systems) from the JRE bin directory reserved for native libraries. ++ . ++ . ++ --- begin of LICENSE --- ++ . ++ GNU LESSER GENERAL PUBLIC LICENSE ++ Version 2.1, February 1999 ++ . ++ Copyright (C) 1991, 1999 Free Software Foundation, Inc. ++ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA ++ Everyone is permitted to copy and distribute verbatim copies ++ of this license document, but changing it is not allowed. ++ . ++ [This is the first released version of the Lesser GPL. It also counts ++ as the successor of the GNU Library Public License, version 2, hence ++ the version number 2.1.] ++ . ++ Preamble ++ . ++ The licenses for most software are designed to take away your ++ freedom to share and change it. By contrast, the GNU General Public ++ Licenses are intended to guarantee your freedom to share and change ++ free software--to make sure the software is free for all its users. ++ . ++ This license, the Lesser General Public License, applies to some ++ specially designated software packages--typically libraries--of the ++ Free Software Foundation and other authors who decide to use it. You ++ can use it too, but we suggest you first think carefully about whether ++ this license or the ordinary General Public License is the better ++ strategy to use in any particular case, based on the explanations below. ++ . ++ When we speak of free software, we are referring to freedom of use, ++ not price. Our General Public Licenses are designed to make sure that ++ you have the freedom to distribute copies of free software (and charge ++ for this service if you wish); that you receive source code or can get ++ it if you want it; that you can change the software and use pieces of ++ it in new free programs; and that you are informed that you can do ++ these things. ++ . ++ To protect your rights, we need to make restrictions that forbid ++ distributors to deny you these rights or to ask you to surrender these ++ rights. These restrictions translate to certain responsibilities for ++ you if you distribute copies of the library or if you modify it. ++ . ++ For example, if you distribute copies of the library, whether gratis ++ or for a fee, you must give the recipients all the rights that we gave ++ you. You must make sure that they, too, receive or can get the source ++ code. If you link other code with the library, you must provide ++ complete object files to the recipients, so that they can relink them ++ with the library after making changes to the library and recompiling ++ it. And you must show them these terms so they know their rights. ++ . ++ We protect your rights with a two-step method: (1) we copyright the ++ library, and (2) we offer you this license, which gives you legal ++ permission to copy, distribute and/or modify the library. ++ . ++ To protect each distributor, we want to make it very clear that ++ there is no warranty for the free library. Also, if the library is ++ modified by someone else and passed on, the recipients should know ++ that what they have is not the original version, so that the original ++ author's reputation will not be affected by problems that might be ++ introduced by others. ++ . ++ Finally, software patents pose a constant threat to the existence of ++ any free program. We wish to make sure that a company cannot ++ effectively restrict the users of a free program by obtaining a ++ restrictive license from a patent holder. Therefore, we insist that ++ any patent license obtained for a version of the library must be ++ consistent with the full freedom of use specified in this license. ++ . ++ Most GNU software, including some libraries, is covered by the ++ ordinary GNU General Public License. This license, the GNU Lesser ++ General Public License, applies to certain designated libraries, and ++ is quite different from the ordinary General Public License. We use ++ this license for certain libraries in order to permit linking those ++ libraries into non-free programs. ++ . ++ When a program is linked with a library, whether statically or using ++ a shared library, the combination of the two is legally speaking a ++ combined work, a derivative of the original library. The ordinary ++ General Public License therefore permits such linking only if the ++ entire combination fits its criteria of freedom. The Lesser General ++ Public License permits more lax criteria for linking other code with ++ the library. ++ . ++ We call this license the "Lesser" General Public License because it ++ does Less to protect the user's freedom than the ordinary General ++ Public License. It also provides other free software developers Less ++ of an advantage over competing non-free programs. These disadvantages ++ are the reason we use the ordinary General Public License for many ++ libraries. However, the Lesser license provides advantages in certain ++ special circumstances. ++ . ++ For example, on rare occasions, there may be a special need to ++ encourage the widest possible use of a certain library, so that it becomes ++ a de-facto standard. To achieve this, non-free programs must be ++ allowed to use the library. A more frequent case is that a free ++ library does the same job as widely used non-free libraries. In this ++ case, there is little to gain by limiting the free library to free ++ software only, so we use the Lesser General Public License. ++ . ++ In other cases, permission to use a particular library in non-free ++ programs enables a greater number of people to use a large body of ++ free software. For example, permission to use the GNU C Library in ++ non-free programs enables many more people to use the whole GNU ++ operating system, as well as its variant, the GNU/Linux operating ++ system. ++ . ++ Although the Lesser General Public License is Less protective of the ++ users' freedom, it does ensure that the user of a program that is ++ linked with the Library has the freedom and the wherewithal to run ++ that program using a modified version of the Library. ++ . ++ The precise terms and conditions for copying, distribution and ++ modification follow. Pay close attention to the difference between a ++ "work based on the library" and a "work that uses the library". The ++ former contains code derived from the library, whereas the latter must ++ be combined with the library in order to run. ++ . ++ GNU LESSER GENERAL PUBLIC LICENSE ++ TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION ++ . ++ 0. This License Agreement applies to any software library or other ++ program which contains a notice placed by the copyright holder or ++ other authorized party saying it may be distributed under the terms of ++ this Lesser General Public License (also called "this License"). ++ Each licensee is addressed as "you". ++ . ++ A "library" means a collection of software functions and/or data ++ prepared so as to be conveniently linked with application programs ++ (which use some of those functions and data) to form executables. ++ . ++ The "Library", below, refers to any such software library or work ++ which has been distributed under these terms. A "work based on the ++ Library" means either the Library or any derivative work under ++ copyright law: that is to say, a work containing the Library or a ++ portion of it, either verbatim or with modifications and/or translated ++ straightforwardly into another language. (Hereinafter, translation is ++ included without limitation in the term "modification".) ++ . ++ "Source code" for a work means the preferred form of the work for ++ making modifications to it. For a library, complete source code means ++ all the source code for all modules it contains, plus any associated ++ interface definition files, plus the scripts used to control compilation ++ and installation of the library. ++ . ++ Activities other than copying, distribution and modification are not ++ covered by this License; they are outside its scope. The act of ++ running a program using the Library is not restricted, and output from ++ such a program is covered only if its contents constitute a work based ++ on the Library (independent of the use of the Library in a tool for ++ writing it). Whether that is true depends on what the Library does ++ and what the program that uses the Library does. ++ . ++ 1. You may copy and distribute verbatim copies of the Library's ++ complete source code as you receive it, in any medium, provided that ++ you conspicuously and appropriately publish on each copy an ++ appropriate copyright notice and disclaimer of warranty; keep intact ++ all the notices that refer to this License and to the absence of any ++ warranty; and distribute a copy of this License along with the ++ Library. ++ . ++ You may charge a fee for the physical act of transferring a copy, ++ and you may at your option offer warranty protection in exchange for a ++ fee. ++ . ++ 2. You may modify your copy or copies of the Library or any portion ++ of it, thus forming a work based on the Library, and copy and ++ distribute such modifications or work under the terms of Section 1 ++ above, provided that you also meet all of these conditions: ++ . ++ a) The modified work must itself be a software library. ++ . ++ b) You must cause the files modified to carry prominent notices ++ stating that you changed the files and the date of any change. ++ . ++ c) You must cause the whole of the work to be licensed at no ++ charge to all third parties under the terms of this License. ++ . ++ d) If a facility in the modified Library refers to a function or a ++ table of data to be supplied by an application program that uses ++ the facility, other than as an argument passed when the facility ++ is invoked, then you must make a good faith effort to ensure that, ++ in the event an application does not supply such function or ++ table, the facility still operates, and performs whatever part of ++ its purpose remains meaningful. ++ . ++ (For example, a function in a library to compute square roots has ++ a purpose that is entirely well-defined independent of the ++ application. Therefore, Subsection 2d requires that any ++ application-supplied function or table used by this function must ++ be optional: if the application does not supply it, the square ++ root function must still compute square roots.) ++ . ++ These requirements apply to the modified work as a whole. If ++ identifiable sections of that work are not derived from the Library, ++ and can be reasonably considered independent and separate works in ++ themselves, then this License, and its terms, do not apply to those ++ sections when you distribute them as separate works. But when you ++ distribute the same sections as part of a whole which is a work based ++ on the Library, the distribution of the whole must be on the terms of ++ this License, whose permissions for other licensees extend to the ++ entire whole, and thus to each and every part regardless of who wrote ++ it. ++ . ++ Thus, it is not the intent of this section to claim rights or contest ++ your rights to work written entirely by you; rather, the intent is to ++ exercise the right to control the distribution of derivative or ++ collective works based on the Library. ++ . ++ In addition, mere aggregation of another work not based on the Library ++ with the Library (or with a work based on the Library) on a volume of ++ a storage or distribution medium does not bring the other work under ++ the scope of this License. ++ . ++ 3. You may opt to apply the terms of the ordinary GNU General Public ++ License instead of this License to a given copy of the Library. To do ++ this, you must alter all the notices that refer to this License, so ++ that they refer to the ordinary GNU General Public License, version 2, ++ instead of to this License. (If a newer version than version 2 of the ++ ordinary GNU General Public License has appeared, then you can specify ++ that version instead if you wish.) Do not make any other change in ++ these notices. ++ . ++ Once this change is made in a given copy, it is irreversible for ++ that copy, so the ordinary GNU General Public License applies to all ++ subsequent copies and derivative works made from that copy. ++ . ++ This option is useful when you wish to copy part of the code of ++ the Library into a program that is not a library. ++ . ++ 4. You may copy and distribute the Library (or a portion or ++ derivative of it, under Section 2) in object code or executable form ++ under the terms of Sections 1 and 2 above provided that you accompany ++ it with the complete corresponding machine-readable source code, which ++ must be distributed under the terms of Sections 1 and 2 above on a ++ medium customarily used for software interchange. ++ . ++ If distribution of object code is made by offering access to copy ++ from a designated place, then offering equivalent access to copy the ++ source code from the same place satisfies the requirement to ++ distribute the source code, even though third parties are not ++ compelled to copy the source along with the object code. ++ . ++ 5. A program that contains no derivative of any portion of the ++ Library, but is designed to work with the Library by being compiled or ++ linked with it, is called a "work that uses the Library". Such a ++ work, in isolation, is not a derivative work of the Library, and ++ therefore falls outside the scope of this License. ++ . ++ However, linking a "work that uses the Library" with the Library ++ creates an executable that is a derivative of the Library (because it ++ contains portions of the Library), rather than a "work that uses the ++ library". The executable is therefore covered by this License. ++ Section 6 states terms for distribution of such executables. ++ . ++ When a "work that uses the Library" uses material from a header file ++ that is part of the Library, the object code for the work may be a ++ derivative work of the Library even though the source code is not. ++ Whether this is true is especially significant if the work can be ++ linked without the Library, or if the work is itself a library. The ++ threshold for this to be true is not precisely defined by law. ++ . ++ If such an object file uses only numerical parameters, data ++ structure layouts and accessors, and small macros and small inline ++ functions (ten lines or less in length), then the use of the object ++ file is unrestricted, regardless of whether it is legally a derivative ++ work. (Executables containing this object code plus portions of the ++ Library will still fall under Section 6.) ++ . ++ Otherwise, if the work is a derivative of the Library, you may ++ distribute the object code for the work under the terms of Section 6. ++ Any executables containing that work also fall under Section 6, ++ whether or not they are linked directly with the Library itself. ++ . ++ 6. As an exception to the Sections above, you may also combine or ++ link a "work that uses the Library" with the Library to produce a ++ work containing portions of the Library, and distribute that work ++ under terms of your choice, provided that the terms permit ++ modification of the work for the customer's own use and reverse ++ engineering for debugging such modifications. ++ . ++ You must give prominent notice with each copy of the work that the ++ Library is used in it and that the Library and its use are covered by ++ this License. You must supply a copy of this License. If the work ++ during execution displays copyright notices, you must include the ++ copyright notice for the Library among them, as well as a reference ++ directing the user to the copy of this License. Also, you must do one ++ of these things: ++ . ++ a) Accompany the work with the complete corresponding ++ machine-readable source code for the Library including whatever ++ changes were used in the work (which must be distributed under ++ Sections 1 and 2 above); and, if the work is an executable linked ++ with the Library, with the complete machine-readable "work that ++ uses the Library", as object code and/or source code, so that the ++ user can modify the Library and then relink to produce a modified ++ executable containing the modified Library. (It is understood ++ that the user who changes the contents of definitions files in the ++ Library will not necessarily be able to recompile the application ++ to use the modified definitions.) ++ . ++ b) Use a suitable shared library mechanism for linking with the ++ Library. A suitable mechanism is one that (1) uses at run time a ++ copy of the library already present on the user's computer system, ++ rather than copying library functions into the executable, and (2) ++ will operate properly with a modified version of the library, if ++ the user installs one, as long as the modified version is ++ interface-compatible with the version that the work was made with. ++ . ++ c) Accompany the work with a written offer, valid for at ++ least three years, to give the same user the materials ++ specified in Subsection 6a, above, for a charge no more ++ than the cost of performing this distribution. ++ . ++ d) If distribution of the work is made by offering access to copy ++ from a designated place, offer equivalent access to copy the above ++ specified materials from the same place. ++ . ++ e) Verify that the user has already received a copy of these ++ materials or that you have already sent this user a copy. ++ . ++ For an executable, the required form of the "work that uses the ++ Library" must include any data and utility programs needed for ++ reproducing the executable from it. However, as a special exception, ++ the materials to be distributed need not include anything that is ++ normally distributed (in either source or binary form) with the major ++ components (compiler, kernel, and so on) of the operating system on ++ which the executable runs, unless that component itself accompanies ++ the executable. ++ . ++ It may happen that this requirement contradicts the license ++ restrictions of other proprietary libraries that do not normally ++ accompany the operating system. Such a contradiction means you cannot ++ use both them and the Library together in an executable that you ++ distribute. ++ . ++ 7. You may place library facilities that are a work based on the ++ Library side-by-side in a single library together with other library ++ facilities not covered by this License, and distribute such a combined ++ library, provided that the separate distribution of the work based on ++ the Library and of the other library facilities is otherwise ++ permitted, and provided that you do these two things: ++ . ++ a) Accompany the combined library with a copy of the same work ++ based on the Library, uncombined with any other library ++ facilities. This must be distributed under the terms of the ++ Sections above. ++ . ++ b) Give prominent notice with the combined library of the fact ++ that part of it is a work based on the Library, and explaining ++ where to find the accompanying uncombined form of the same work. ++ . ++ 8. You may not copy, modify, sublicense, link with, or distribute ++ the Library except as expressly provided under this License. Any ++ attempt otherwise to copy, modify, sublicense, link with, or ++ distribute the Library is void, and will automatically terminate your ++ rights under this License. However, parties who have received copies, ++ or rights, from you under this License will not have their licenses ++ terminated so long as such parties remain in full compliance. ++ . ++ 9. You are not required to accept this License, since you have not ++ signed it. However, nothing else grants you permission to modify or ++ distribute the Library or its derivative works. These actions are ++ prohibited by law if you do not accept this License. Therefore, by ++ modifying or distributing the Library (or any work based on the ++ Library), you indicate your acceptance of this License to do so, and ++ all its terms and conditions for copying, distributing or modifying ++ the Library or works based on it. ++ . ++ 10. Each time you redistribute the Library (or any work based on the ++ Library), the recipient automatically receives a license from the ++ original licensor to copy, distribute, link with or modify the Library ++ subject to these terms and conditions. You may not impose any further ++ restrictions on the recipients' exercise of the rights granted herein. ++ You are not responsible for enforcing compliance by third parties with ++ this License. ++ . ++ 11. If, as a consequence of a court judgment or allegation of patent ++ infringement or for any other reason (not limited to patent issues), ++ conditions are imposed on you (whether by court order, agreement or ++ otherwise) that contradict the conditions of this License, they do not ++ excuse you from the conditions of this License. If you cannot ++ distribute so as to satisfy simultaneously your obligations under this ++ License and any other pertinent obligations, then as a consequence you ++ may not distribute the Library at all. For example, if a patent ++ license would not permit royalty-free redistribution of the Library by ++ all those who receive copies directly or indirectly through you, then ++ the only way you could satisfy both it and this License would be to ++ refrain entirely from distribution of the Library. ++ . ++ If any portion of this section is held invalid or unenforceable under any ++ particular circumstance, the balance of the section is intended to apply, ++ and the section as a whole is intended to apply in other circumstances. ++ . ++ It is not the purpose of this section to induce you to infringe any ++ patents or other property right claims or to contest validity of any ++ such claims; this section has the sole purpose of protecting the ++ integrity of the free software distribution system which is ++ implemented by public license practices. Many people have made ++ generous contributions to the wide range of software distributed ++ through that system in reliance on consistent application of that ++ system; it is up to the author/donor to decide if he or she is willing ++ to distribute software through any other system and a licensee cannot ++ impose that choice. ++ . ++ This section is intended to make thoroughly clear what is believed to ++ be a consequence of the rest of this License. ++ . ++ 12. If the distribution and/or use of the Library is restricted in ++ certain countries either by patents or by copyrighted interfaces, the ++ original copyright holder who places the Library under this License may add ++ an explicit geographical distribution limitation excluding those countries, ++ so that distribution is permitted only in or among countries not thus ++ excluded. In such case, this License incorporates the limitation as if ++ written in the body of this License. ++ . ++ 13. The Free Software Foundation may publish revised and/or new ++ versions of the Lesser General Public License from time to time. ++ Such new versions will be similar in spirit to the present version, ++ but may differ in detail to address new problems or concerns. ++ . ++ Each version is given a distinguishing version number. If the Library ++ specifies a version number of this License which applies to it and ++ "any later version", you have the option of following the terms and ++ conditions either of that version or of any later version published by ++ the Free Software Foundation. If the Library does not specify a ++ license version number, you may choose any version ever published by ++ the Free Software Foundation. ++ . ++ 14. If you wish to incorporate parts of the Library into other free ++ programs whose distribution conditions are incompatible with these, ++ write to the author to ask for permission. For software which is ++ copyrighted by the Free Software Foundation, write to the Free ++ Software Foundation; we sometimes make exceptions for this. Our ++ decision will be guided by the two goals of preserving the free status ++ of all derivatives of our free software and of promoting the sharing ++ and reuse of software generally. ++ . ++ NO WARRANTY ++ . ++ 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO ++ WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. ++ EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR ++ OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY ++ KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE ++ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR ++ PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE ++ LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME ++ THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. ++ . ++ 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN ++ WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY ++ AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU ++ FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR ++ CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE ++ LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING ++ RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A ++ FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF ++ SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH ++ DAMAGES. ++ . ++ END OF TERMS AND CONDITIONS ++ . ++ How to Apply These Terms to Your New Libraries ++ . ++ If you develop a new library, and you want it to be of the greatest ++ possible use to the public, we recommend making it free software that ++ everyone can redistribute and change. You can do so by permitting ++ redistribution under these terms (or, alternatively, under the terms of the ++ ordinary General Public License). ++ . ++ To apply these terms, attach the following notices to the library. It is ++ safest to attach them to the start of each source file to most effectively ++ convey the exclusion of warranty; and each file should have at least the ++ "copyright" line and a pointer to where the full notice is found. ++ . ++ ++ Copyright (C) ++ . ++ This library is free software; you can redistribute it and/or ++ modify it under the terms of the GNU Lesser General Public ++ License as published by the Free Software Foundation; either ++ version 2.1 of the License, or (at your option) any later version. ++ . ++ This library is distributed in the hope that it will be useful, ++ but WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ Lesser General Public License for more details. ++ . ++ You should have received a copy of the GNU Lesser General Public ++ License along with this library; if not, write to the Free Software ++ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA ++ . ++ Also add information on how to contact you by electronic and paper mail. ++ . ++ You should also get your employer (if you work as a programmer) or your ++ school, if any, to sign a "copyright disclaimer" for the library, if ++ necessary. Here is a sample; alter the names: ++ . ++ Yoyodyne, Inc., hereby disclaims all copyright interest in the ++ library `Frob' (a library for tweaking knobs) written by James Random Hacker. ++ . ++ , 1 April 1990 ++ Ty Coon, President of Vice ++ . ++ That's all there is to it! ++ . ++ --- end of LICENSE --- ++ . ++ ------------------------------------------------------------------------------ ++ . ++ %% This notice is provided with respect to ECMAScript Language ++ Specification ECMA-262 Edition 5.1 which may be included with ++ JRE 8, JDK 8, and OpenJDK 8. ++ . ++ --- begin of LICENSE --- ++ . ++ Copyright notice ++ Copyright © 2011 Ecma International ++ Ecma International ++ Rue du Rhone 114 ++ CH-1204 Geneva ++ Tel: +41 22 849 6000 ++ Fax: +41 22 849 6001 ++ Web: http://www.ecma-international.org ++ . ++ This document and possible translations of it may be copied and furnished to ++ others, and derivative works that comment on or otherwise explain it or assist ++ in its implementation may be prepared, copied, published, and distributed, in ++ whole or in part, without restriction of any kind, provided that the above ++ copyright notice and this section are included on all such copies and derivative ++ works. However, this document itself may not be modified in any way, including ++ by removing the copyright notice or references to Ecma International, except as ++ needed for the purpose of developing any document or deliverable produced by ++ Ecma International (in which case the rules applied to copyrights must be ++ followed) or as required to translate it into languages other than English. The ++ limited permissions granted above are perpetual and will not be revoked by Ecma ++ International or its successors or assigns. This document and the information ++ contained herein is provided on an "AS IS" basis and ECMA INTERNATIONAL ++ DISCLAIMS ALL WARRANTIES, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY ++ WARRANTY THAT THE USE OF THE INFORMATION HEREIN WILL NOT INFRINGE ANY OWNERSHIP ++ RIGHTS OR ANY IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR ++ PURPOSE." Software License ++ . ++ All Software contained in this document ("Software)" is protected by copyright ++ and is being made available under the "BSD License", included below. This ++ Software may be subject to third party rights (rights from parties other than ++ Ecma International), including patent rights, and no licenses under such third ++ party rights are granted under this license even if the third party concerned is ++ a member of Ecma International. SEE THE ECMA CODE OF CONDUCT IN PATENT MATTERS ++ AVAILABLE AT http://www.ecma-international.org/memento/codeofconduct.htm FOR ++ INFORMATION REGARDING THE LICENSING OF PATENT CLAIMS THAT ARE REQUIRED TO ++ IMPLEMENT ECMA INTERNATIONAL STANDARDS*. Redistribution and use in source and ++ binary forms, with or without modification, are permitted provided that the ++ following conditions are met: ++ . ++ 1. Redistributions of source code must retain the above copyright notice, this ++ list of conditions and the following disclaimer. ++ . ++ 2. Redistributions in binary form must reproduce the above copyright notice, ++ this list of conditions and the following disclaimer in the documentation and/or ++ other materials provided with the distribution. ++ . ++ 3. Neither the name of the authors nor Ecma International may be used to endorse ++ or promote products derived from this software without specific prior written ++ permission. ++ . ++ THIS SOFTWARE IS PROVIDED BY THE ECMA INTERNATIONAL "AS IS" AND ANY EXPRESS OR ++ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF ++ MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT ++ SHALL ECMA INTERNATIONAL BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, ++ PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR ++ BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING ++ IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY ++ OF SUCH DAMAGE. ++ --- end of LICENSE --- ++ . ++ %% This notice is provided with respect to Dynalink library which is included ++ with the Nashorn technology. ++ . ++ --- begin of LICENSE --- ++ Copyright (c) 2009-2013, Attila Szegedi ++ . ++ Redistribution and use in source and binary forms, with or without ++ modification, are permitted provided that the following conditions are ++ met: ++ * Redistributions of source code must retain the above copyright ++ notice, this list of conditions and the following disclaimer. ++ * Redistributions in binary form must reproduce the above copyright ++ notice, this list of conditions and the following disclaimer in the ++ documentation and/or other materials provided with the distribution. ++ * Neither the name of the copyright holder nor the names of ++ contributors may be used to endorse or promote products derived from ++ this software without specific prior written permission. ++ . ++ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS ++ IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED ++ TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A ++ PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDER ++ BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR ++ BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, ++ WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR ++ OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ++ ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ --- end of LICENSE --- ++ . ++ %% This notice is provided with respect to Joni library which is included ++ with the Nashorn technology. ++ . ++ --- begin of LICENSE --- ++ Permission is hereby granted, free of charge, to any person obtaining a copy of ++ this software and associated documentation files (the "Software"), to deal in ++ the Software without restriction, including without limitation the rights to ++ use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies ++ of the Software, and to permit persons to whom the Software is furnished to do ++ so, subject to the following conditions: ++ . ++ The above copyright notice and this permission notice shall be included in all ++ copies or substantial portions of the Software. ++ . ++ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE ++ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ++ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, ++ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE ++ SOFTWARE. ++ . ++ --- end of LICENSE --- ++ . ++ ------------------------------------------------------------------------------ ++ . ++ %% This notice is provided with respect to FontConfig 2.5, which may be ++ included with JRE 8, JDK 8, and OpenJDK 8 source distributions on ++ Linux and Solaris. ++ . ++ --- begin of LICENSE --- ++ . ++ Copyright © 2001,2003 Keith Packard ++ . ++ Permission to use, copy, modify, distribute, and sell this software and its ++ documentation for any purpose is hereby granted without fee, provided that the ++ above copyright notice appear in all copies and that both that copyright ++ notice and this permission notice appear in supporting documentation, and that ++ the name of Keith Packard not be used in advertising or publicity pertaining ++ to distribution of the software without specific, written prior permission. ++ Keith Packard makes no representations about the suitability of this software ++ for any purpose. It is provided "as is" without express or implied warranty. ++ . ++ KEITH PACKARD DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ++ ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL KEITH ++ PACKARD BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY ++ DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ++ ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++ CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ . ++ . ++ --- end of LICENSE --- ++ . ++ ------------------------------------------------------------------------------ ++ . ++ %% This notice is provided with respect to IAIK PKCS#11 Wrapper, ++ which may be included with JRE 8, JDK 8, and OpenJDK 8. ++ . ++ --- begin of LICENSE --- ++ . ++ IAIK PKCS#11 Wrapper License ++ . ++ Copyright (c) 2002 Graz University of Technology. All rights reserved. ++ . ++ Redistribution and use in source and binary forms, with or without ++ modification, are permitted provided that the following conditions are met: ++ . ++ 1. Redistributions of source code must retain the above copyright notice, this ++ list of conditions and the following disclaimer. ++ . ++ 2. Redistributions in binary form must reproduce the above copyright notice, ++ this list of conditions and the following disclaimer in the documentation ++ and/or other materials provided with the distribution. ++ . ++ 3. The end-user documentation included with the redistribution, if any, must ++ include the following acknowledgment: ++ . ++ "This product includes software developed by IAIK of Graz University of ++ Technology." ++ . ++ Alternately, this acknowledgment may appear in the software itself, if and ++ wherever such third-party acknowledgments normally appear. ++ . ++ 4. The names "Graz University of Technology" and "IAIK of Graz University of ++ Technology" must not be used to endorse or promote products derived from this ++ software without prior written permission. ++ . ++ 5. Products derived from this software may not be called "IAIK PKCS Wrapper", ++ nor may "IAIK" appear in their name, without prior written permission of ++ Graz University of Technology. ++ . ++ THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESSED OR IMPLIED WARRANTIES, ++ INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND ++ FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE ++ LICENSOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, ++ OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ POSSIBILITY OF SUCH DAMAGE. ++ . ++ --- end of LICENSE --- ++ . ++ ------------------------------------------------------------------------------ ++ . ++ %% This notice is provided with respect to ICU4C 4.0.1 and ICU4J 4.4, which ++ may be included with JRE 8, JDK 8, and OpenJDK 8. ++ . ++ --- begin of LICENSE --- ++ . ++ Copyright (c) 1995-2010 International Business Machines Corporation and others ++ . ++ All rights reserved. ++ . ++ Permission is hereby granted, free of charge, to any person obtaining a copy ++ of this software and associated documentation files (the "Software"), to deal ++ in the Software without restriction, including without limitation the rights ++ to use, copy, modify, merge, publish, distribute, and/or sell copies of the ++ Software, and to permit persons to whom the Software is furnished to do so, ++ provided that the above copyright notice(s) and this permission notice appear ++ in all copies of the Software and that both the above copyright notice(s) and ++ this permission notice appear in supporting documentation. ++ . ++ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN ++ NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN THIS NOTICE BE ++ LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY ++ DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ++ ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++ CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ . ++ Except as contained in this notice, the name of a copyright holder shall not ++ be used in advertising or otherwise to promote the sale, use or other dealings ++ in this Software without prior written authorization of the copyright holder. ++ All trademarks and registered trademarks mentioned herein are the property of ++ their respective owners. ++ . ++ --- end of LICENSE --- ++ . ++ ------------------------------------------------------------------------------ ++ . ++ %% This notice is provided with respect to IJG JPEG 6b, which may be ++ included with JRE 8, JDK 8, and OpenJDK 8. ++ . ++ --- begin of LICENSE --- ++ . ++ This software is copyright (C) 1991-1998, Thomas G. Lane. ++ All Rights Reserved except as specified below. ++ . ++ Permission is hereby granted to use, copy, modify, and distribute this ++ software (or portions thereof) for any purpose, without fee, subject to these ++ conditions: ++ (1) If any part of the source code for this software is distributed, then this ++ README file must be included, with this copyright and no-warranty notice ++ unaltered; and any additions, deletions, or changes to the original files ++ must be clearly indicated in accompanying documentation. ++ (2) If only executable code is distributed, then the accompanying ++ documentation must state that "this software is based in part on the work of ++ the Independent JPEG Group". ++ (3) Permission for use of this software is granted only if the user accepts ++ full responsibility for any undesirable consequences; the authors accept ++ NO LIABILITY for damages of any kind. ++ . ++ These conditions apply to any software derived from or based on the IJG code, ++ not just to the unmodified library. If you use our work, you ought to ++ acknowledge us. ++ . ++ Permission is NOT granted for the use of any IJG author's name or company name ++ in advertising or publicity relating to this software or products derived from ++ it. This software may be referred to only as "the Independent JPEG Group's ++ software". ++ . ++ We specifically permit and encourage the use of this software as the basis of ++ commercial products, provided that all warranty or liability claims are ++ assumed by the product vendor. ++ . ++ --- end of LICENSE --- ++ . ++ ------------------------------------------------------------------------------ ++ . ++ %% This notice is provided with respect to Joni v1.1.9, which may be ++ included with JRE 8, JDK 8, and OpenJDK 8. ++ . ++ --- begin of LICENSE --- ++ . ++ Permission is hereby granted, free of charge, to any person obtaining a copy ++ of this software and associated documentation files (the "Software"), to deal ++ in the Software without restriction, including without limitation the rights ++ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++ copies of the Software, and to permit persons to whom the Software is ++ furnished to do so, subject to the following conditions: ++ . ++ The above copyright notice and this permission notice shall be included in all ++ copies or substantial portions of the Software. ++ . ++ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE ++ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ++ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, ++ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE ++ SOFTWARE. ++ . ++ --- end of LICENSE --- ++ . ++ ------------------------------------------------------------------------------ ++ . ++ %% This notice is provided with respect to JOpt-Simple v3.0, which may be ++ included with JRE 8, JDK 8, and OpenJDK 8. ++ . ++ --- begin of LICENSE --- ++ . ++ Copyright (c) 2004-2009 Paul R. Holser, Jr. ++ . ++ Permission is hereby granted, free of charge, to any person obtaining ++ a copy of this software and associated documentation files (the ++ "Software"), to deal in the Software without restriction, including ++ without limitation the rights to use, copy, modify, merge, publish, ++ distribute, sublicense, and/or sell copies of the Software, and to ++ permit persons to whom the Software is furnished to do so, subject to ++ the following conditions: ++ . ++ The above copyright notice and this permission notice shall be ++ included in all copies or substantial portions of the Software. ++ . ++ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND ++ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE ++ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION ++ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION ++ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ . ++ --- end of LICENSE --- ++ . ++ ------------------------------------------------------------------------------ ++ . ++ %% This notice is provided with respect to Kerberos functionality, which ++ which may be included with JRE 8, JDK 8, and OpenJDK 8. ++ . ++ --- begin of LICENSE --- ++ . ++ (C) Copyright IBM Corp. 1999 All Rights Reserved. ++ Copyright 1997 The Open Group Research Institute. All rights reserved. ++ . ++ --- end of LICENSE --- ++ . ++ ------------------------------------------------------------------------------ ++ . ++ %% This notice is provided with respect to Kerberos functionality from ++ FundsXpress, INC., which may be included with JRE 8, JDK 8, and OpenJDK 8. ++ . ++ --- begin of LICENSE --- ++ . ++ Copyright (C) 1998 by the FundsXpress, INC. ++ . ++ All rights reserved. ++ . ++ Export of this software from the United States of America may require ++ a specific license from the United States Government. It is the ++ responsibility of any person or organization contemplating export to ++ obtain such a license before exporting. ++ . ++ WITHIN THAT CONSTRAINT, permission to use, copy, modify, and ++ distribute this software and its documentation for any purpose and ++ without fee is hereby granted, provided that the above copyright ++ notice appear in all copies and that both that copyright notice and ++ this permission notice appear in supporting documentation, and that ++ the name of FundsXpress. not be used in advertising or publicity pertaining ++ to distribution of the software without specific, written prior ++ permission. FundsXpress makes no representations about the suitability of ++ this software for any purpose. It is provided "as is" without express ++ or implied warranty. ++ . ++ THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ . ++ . ++ --- end of LICENSE --- ++ . ++ ------------------------------------------------------------------------------ ++ . ++ %% This notice is provided with respect to Kronos OpenGL headers, which may be ++ included with JDK 8 and OpenJDK 8 source distributions. ++ . ++ --- begin of LICENSE --- ++ . ++ Copyright (c) 2007 The Khronos Group Inc. ++ . ++ Permission is hereby granted, free of charge, to any person obtaining a copy ++ of this software and/or associated documentation files (the "Materials"), to ++ deal in the Materials without restriction, including without limitation the ++ rights to use, copy, modify, merge, publish, distribute, sublicense, and/or ++ sell copies of the Materials, and to permit persons to whom the Materials are ++ furnished to do so, subject to the following conditions: ++ . ++ The above copyright notice and this permission notice shall be included in all ++ copies or substantial portions of the Materials. ++ . ++ THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE ++ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ++ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, ++ OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS IN THE ++ MATERIALS. ++ . ++ --- end of LICENSE --- ++ . ++ ------------------------------------------------------------------------------- ++ . ++ %% Lucida is a registered trademark or trademark of Bigelow & Holmes in the ++ U.S. and other countries. ++ . ++ ------------------------------------------------------------------------------- ++ . ++ %% This notice is provided with respect to Mesa 3D Graphics Library v4.1, ++ which may be included with JRE 8, JDK 8, and OpenJDK 8 source distributions. ++ . ++ --- begin of LICENSE --- ++ . ++ Mesa 3-D graphics library ++ Version: 4.1 ++ . ++ Copyright (C) 1999-2002 Brian Paul All Rights Reserved. ++ . ++ Permission is hereby granted, free of charge, to any person obtaining a ++ copy of this software and associated documentation files (the "Software"), ++ to deal in the Software without restriction, including without limitation ++ the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ and/or sell copies of the Software, and to permit persons to whom the ++ Software is furnished to do so, subject to the following conditions: ++ . ++ The above copyright notice and this permission notice shall be included ++ in all copies or substantial portions of the Software. ++ . ++ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS ++ OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ BRIAN PAUL BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN ++ AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ . ++ --- end of LICENSE --- ++ . ++ ------------------------------------------------------------------------------ ++ . ++ %% This notice is provided with respect to Mozilla Network Security ++ Services (NSS), which is supplied with the JDK test suite in the OpenJDK ++ source code repository. It is licensed under Mozilla Public License (MPL), ++ version 2.0. ++ . ++ The NSS libraries are supplied in executable form, built from unmodified ++ NSS source code labeled with the "NSS_3.13.1_RTM" release tag. ++ . ++ The NSS source code is available in the OpenJDK source code repository at: ++ jdk/test/sun/security/pkcs11/nss/src ++ . ++ The NSS libraries are available in the OpenJDK source code repository at: ++ jdk/test/sun/security/pkcs11/nss/lib ++ . ++ --- begin of LICENSE --- ++ . ++ Mozilla Public License Version 2.0 ++ ================================== ++ . ++ 1. Definitions ++ -------------- ++ . ++ 1.1. "Contributor" ++ means each individual or legal entity that creates, contributes to ++ the creation of, or owns Covered Software. ++ . ++ 1.2. "Contributor Version" ++ means the combination of the Contributions of others (if any) used ++ by a Contributor and that particular Contributor's Contribution. ++ . ++ 1.3. "Contribution" ++ means Covered Software of a particular Contributor. ++ . ++ 1.4. "Covered Software" ++ means Source Code Form to which the initial Contributor has attached ++ the notice in Exhibit A, the Executable Form of such Source Code ++ Form, and Modifications of such Source Code Form, in each case ++ including portions thereof. ++ . ++ 1.5. "Incompatible With Secondary Licenses" ++ means ++ . ++ (a) that the initial Contributor has attached the notice described ++ in Exhibit B to the Covered Software; or ++ . ++ (b) that the Covered Software was made available under the terms of ++ version 1.1 or earlier of the License, but not also under the ++ terms of a Secondary License. ++ . ++ 1.6. "Executable Form" ++ means any form of the work other than Source Code Form. ++ . ++ 1.7. "Larger Work" ++ means a work that combines Covered Software with other material, in ++ a separate file or files, that is not Covered Software. ++ . ++ 1.8. "License" ++ means this document. ++ . ++ 1.9. "Licensable" ++ means having the right to grant, to the maximum extent possible, ++ whether at the time of the initial grant or subsequently, any and ++ all of the rights conveyed by this License. ++ . ++ 1.10. "Modifications" ++ means any of the following: ++ . ++ (a) any file in Source Code Form that results from an addition to, ++ deletion from, or modification of the contents of Covered ++ Software; or ++ . ++ (b) any new file in Source Code Form that contains any Covered ++ Software. ++ . ++ 1.11. "Patent Claims" of a Contributor ++ means any patent claim(s), including without limitation, method, ++ process, and apparatus claims, in any patent Licensable by such ++ Contributor that would be infringed, but for the grant of the ++ License, by the making, using, selling, offering for sale, having ++ made, import, or transfer of either its Contributions or its ++ Contributor Version. ++ . ++ 1.12. "Secondary License" ++ means either the GNU General Public License, Version 2.0, the GNU ++ Lesser General Public License, Version 2.1, the GNU Affero General ++ Public License, Version 3.0, or any later versions of those ++ licenses. ++ . ++ 1.13. "Source Code Form" ++ means the form of the work preferred for making modifications. ++ . ++ 1.14. "You" (or "Your") ++ means an individual or a legal entity exercising rights under this ++ License. For legal entities, "You" includes any entity that ++ controls, is controlled by, or is under common control with You. For ++ purposes of this definition, "control" means (a) the power, direct ++ or indirect, to cause the direction or management of such entity, ++ whether by contract or otherwise, or (b) ownership of more than ++ fifty percent (50%) of the outstanding shares or beneficial ++ ownership of such entity. ++ . ++ 2. License Grants and Conditions ++ -------------------------------- ++ . ++ 2.1. Grants ++ . ++ Each Contributor hereby grants You a world-wide, royalty-free, ++ non-exclusive license: ++ . ++ (a) under intellectual property rights (other than patent or trademark) ++ Licensable by such Contributor to use, reproduce, make available, ++ modify, display, perform, distribute, and otherwise exploit its ++ Contributions, either on an unmodified basis, with Modifications, or ++ as part of a Larger Work; and ++ . ++ (b) under Patent Claims of such Contributor to make, use, sell, offer ++ for sale, have made, import, and otherwise transfer either its ++ Contributions or its Contributor Version. ++ . ++ 2.2. Effective Date ++ . ++ The licenses granted in Section 2.1 with respect to any Contribution ++ become effective for each Contribution on the date the Contributor first ++ distributes such Contribution. ++ . ++ 2.3. Limitations on Grant Scope ++ . ++ The licenses granted in this Section 2 are the only rights granted under ++ this License. No additional rights or licenses will be implied from the ++ distribution or licensing of Covered Software under this License. ++ Notwithstanding Section 2.1(b) above, no patent license is granted by a ++ Contributor: ++ . ++ (a) for any code that a Contributor has removed from Covered Software; ++ or ++ . ++ (b) for infringements caused by: (i) Your and any other third party's ++ modifications of Covered Software, or (ii) the combination of its ++ Contributions with other software (except as part of its Contributor ++ Version); or ++ . ++ (c) under Patent Claims infringed by Covered Software in the absence of ++ its Contributions. ++ . ++ This License does not grant any rights in the trademarks, service marks, ++ or logos of any Contributor (except as may be necessary to comply with ++ the notice requirements in Section 3.4). ++ . ++ 2.4. Subsequent Licenses ++ . ++ No Contributor makes additional grants as a result of Your choice to ++ distribute the Covered Software under a subsequent version of this ++ License (see Section 10.2) or under the terms of a Secondary License (if ++ permitted under the terms of Section 3.3). ++ . ++ 2.5. Representation ++ . ++ Each Contributor represents that the Contributor believes its ++ Contributions are its original creation(s) or it has sufficient rights ++ to grant the rights to its Contributions conveyed by this License. ++ . ++ 2.6. Fair Use ++ . ++ This License is not intended to limit any rights You have under ++ applicable copyright doctrines of fair use, fair dealing, or other ++ equivalents. ++ . ++ 2.7. Conditions ++ . ++ Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted ++ in Section 2.1. ++ . ++ 3. Responsibilities ++ ------------------- ++ . ++ 3.1. Distribution of Source Form ++ . ++ All distribution of Covered Software in Source Code Form, including any ++ Modifications that You create or to which You contribute, must be under ++ the terms of this License. You must inform recipients that the Source ++ Code Form of the Covered Software is governed by the terms of this ++ License, and how they can obtain a copy of this License. You may not ++ attempt to alter or restrict the recipients' rights in the Source Code ++ Form. ++ . ++ 3.2. Distribution of Executable Form ++ . ++ If You distribute Covered Software in Executable Form then: ++ . ++ (a) such Covered Software must also be made available in Source Code ++ Form, as described in Section 3.1, and You must inform recipients of ++ the Executable Form how they can obtain a copy of such Source Code ++ Form by reasonable means in a timely manner, at a charge no more ++ than the cost of distribution to the recipient; and ++ . ++ (b) You may distribute such Executable Form under the terms of this ++ License, or sublicense it under different terms, provided that the ++ license for the Executable Form does not attempt to limit or alter ++ the recipients' rights in the Source Code Form under this License. ++ . ++ 3.3. Distribution of a Larger Work ++ . ++ You may create and distribute a Larger Work under terms of Your choice, ++ provided that You also comply with the requirements of this License for ++ the Covered Software. If the Larger Work is a combination of Covered ++ Software with a work governed by one or more Secondary Licenses, and the ++ Covered Software is not Incompatible With Secondary Licenses, this ++ License permits You to additionally distribute such Covered Software ++ under the terms of such Secondary License(s), so that the recipient of ++ the Larger Work may, at their option, further distribute the Covered ++ Software under the terms of either this License or such Secondary ++ License(s). ++ . ++ 3.4. Notices ++ . ++ You may not remove or alter the substance of any license notices ++ (including copyright notices, patent notices, disclaimers of warranty, ++ or limitations of liability) contained within the Source Code Form of ++ the Covered Software, except that You may alter any license notices to ++ the extent required to remedy known factual inaccuracies. ++ . ++ 3.5. Application of Additional Terms ++ . ++ You may choose to offer, and to charge a fee for, warranty, support, ++ indemnity or liability obligations to one or more recipients of Covered ++ Software. However, You may do so only on Your own behalf, and not on ++ behalf of any Contributor. You must make it absolutely clear that any ++ such warranty, support, indemnity, or liability obligation is offered by ++ You alone, and You hereby agree to indemnify every Contributor for any ++ liability incurred by such Contributor as a result of warranty, support, ++ indemnity or liability terms You offer. You may include additional ++ disclaimers of warranty and limitations of liability specific to any ++ jurisdiction. ++ . ++ 4. Inability to Comply Due to Statute or Regulation ++ --------------------------------------------------- ++ . ++ If it is impossible for You to comply with any of the terms of this ++ License with respect to some or all of the Covered Software due to ++ statute, judicial order, or regulation then You must: (a) comply with ++ the terms of this License to the maximum extent possible; and (b) ++ describe the limitations and the code they affect. Such description must ++ be placed in a text file included with all distributions of the Covered ++ Software under this License. Except to the extent prohibited by statute ++ or regulation, such description must be sufficiently detailed for a ++ recipient of ordinary skill to be able to understand it. ++ . ++ 5. Termination ++ -------------- ++ . ++ 5.1. The rights granted under this License will terminate automatically ++ if You fail to comply with any of its terms. However, if You become ++ compliant, then the rights granted under this License from a particular ++ Contributor are reinstated (a) provisionally, unless and until such ++ Contributor explicitly and finally terminates Your grants, and (b) on an ++ ongoing basis, if such Contributor fails to notify You of the ++ non-compliance by some reasonable means prior to 60 days after You have ++ come back into compliance. Moreover, Your grants from a particular ++ Contributor are reinstated on an ongoing basis if such Contributor ++ notifies You of the non-compliance by some reasonable means, this is the ++ first time You have received notice of non-compliance with this License ++ from such Contributor, and You become compliant prior to 30 days after ++ Your receipt of the notice. ++ . ++ 5.2. If You initiate litigation against any entity by asserting a patent ++ infringement claim (excluding declaratory judgment actions, ++ counter-claims, and cross-claims) alleging that a Contributor Version ++ directly or indirectly infringes any patent, then the rights granted to ++ You by any and all Contributors for the Covered Software under Section ++ 2.1 of this License shall terminate. ++ . ++ 5.3. In the event of termination under Sections 5.1 or 5.2 above, all ++ end user license agreements (excluding distributors and resellers) which ++ have been validly granted by You or Your distributors under this License ++ prior to termination shall survive termination. ++ . ++ ************************************************************************ ++ * * ++ * 6. Disclaimer of Warranty * ++ * ------------------------- * ++ * * ++ * Covered Software is provided under this License on an "as is" * ++ * basis, without warranty of any kind, either expressed, implied, or * ++ * statutory, including, without limitation, warranties that the * ++ * Covered Software is free of defects, merchantable, fit for a * ++ * particular purpose or non-infringing. The entire risk as to the * ++ * quality and performance of the Covered Software is with You. * ++ * Should any Covered Software prove defective in any respect, You * ++ * (not any Contributor) assume the cost of any necessary servicing, * ++ * repair, or correction. This disclaimer of warranty constitutes an * ++ * essential part of this License. No use of any Covered Software is * ++ * authorized under this License except under this disclaimer. * ++ * * ++ ************************************************************************ ++ . ++ ************************************************************************ ++ * * ++ * 7. Limitation of Liability * ++ * -------------------------- * ++ * * ++ * Under no circumstances and under no legal theory, whether tort * ++ * (including negligence), contract, or otherwise, shall any * ++ * Contributor, or anyone who distributes Covered Software as * ++ * permitted above, be liable to You for any direct, indirect, * ++ * special, incidental, or consequential damages of any character * ++ * including, without limitation, damages for lost profits, loss of * ++ * goodwill, work stoppage, computer failure or malfunction, or any * ++ * and all other commercial damages or losses, even if such party * ++ * shall have been informed of the possibility of such damages. This * ++ * limitation of liability shall not apply to liability for death or * ++ * personal injury resulting from such party's negligence to the * ++ * extent applicable law prohibits such limitation. Some * ++ * jurisdictions do not allow the exclusion or limitation of * ++ * incidental or consequential damages, so this exclusion and * ++ * limitation may not apply to You. * ++ * * ++ ************************************************************************ ++ . ++ 8. Litigation ++ ------------- ++ . ++ Any litigation relating to this License may be brought only in the ++ courts of a jurisdiction where the defendant maintains its principal ++ place of business and such litigation shall be governed by laws of that ++ jurisdiction, without reference to its conflict-of-law provisions. ++ Nothing in this Section shall prevent a party's ability to bring ++ cross-claims or counter-claims. ++ . ++ 9. Miscellaneous ++ ---------------- ++ . ++ This License represents the complete agreement concerning the subject ++ matter hereof. If any provision of this License is held to be ++ unenforceable, such provision shall be reformed only to the extent ++ necessary to make it enforceable. Any law or regulation which provides ++ that the language of a contract shall be construed against the drafter ++ shall not be used to construe this License against a Contributor. ++ . ++ 10. Versions of the License ++ --------------------------- ++ . ++ 10.1. New Versions ++ . ++ Mozilla Foundation is the license steward. Except as provided in Section ++ 10.3, no one other than the license steward has the right to modify or ++ publish new versions of this License. Each version will be given a ++ distinguishing version number. ++ . ++ 10.2. Effect of New Versions ++ . ++ You may distribute the Covered Software under the terms of the version ++ of the License under which You originally received the Covered Software, ++ or under the terms of any subsequent version published by the license ++ steward. ++ . ++ 10.3. Modified Versions ++ . ++ If you create software not governed by this License, and you want to ++ create a new license for such software, you may create and use a ++ modified version of this License if you rename the license and remove ++ any references to the name of the license steward (except to note that ++ such modified license differs from this License). ++ . ++ 10.4. Distributing Source Code Form that is Incompatible With Secondary ++ Licenses ++ . ++ If You choose to distribute Source Code Form that is Incompatible With ++ Secondary Licenses under the terms of this version of the License, the ++ notice described in Exhibit B of this License must be attached. ++ . ++ Exhibit A - Source Code Form License Notice ++ ------------------------------------------- ++ . ++ This Source Code Form is subject to the terms of the Mozilla Public ++ License, v. 2.0. If a copy of the MPL was not distributed with this ++ file, You can obtain one at http://mozilla.org/MPL/2.0/. ++ . ++ If it is not possible or desirable to put the notice in a particular ++ file, then You may include the notice in a location (such as a LICENSE ++ file in a relevant directory) where a recipient would be likely to look ++ for such a notice. ++ . ++ You may add additional accurate notices of copyright ownership. ++ . ++ Exhibit B - "Incompatible With Secondary Licenses" Notice ++ --------------------------------------------------------- ++ . ++ This Source Code Form is "Incompatible With Secondary Licenses", as ++ defined by the Mozilla Public License, v. 2.0. ++ . ++ --- end of LICENSE --- ++ . ++ ------------------------------------------------------------------------------ ++ . ++ %% This notice is provided with respect to PC/SC Lite for Suse Linux v.1.1.1, ++ which may be included with JRE 8, JDK 8, and OpenJDK 8 on Linux and Solaris. ++ . ++ --- begin of LICENSE --- ++ . ++ Copyright (c) 1999-2004 David Corcoran ++ Copyright (c) 1999-2004 Ludovic Rousseau ++ All rights reserved. ++ . ++ Redistribution and use in source and binary forms, with or without ++ modification, are permitted provided that the following conditions ++ are met: ++ . ++ 1. Redistributions of source code must retain the above copyright ++ notice, this list of conditions and the following disclaimer. ++ 2. Redistributions in binary form must reproduce the above copyright ++ notice, this list of conditions and the following disclaimer in the ++ documentation and/or other materials provided with the distribution. ++ 3. All advertising materials mentioning features or use of this software ++ must display the following acknowledgement: ++ This product includes software developed by: ++ David Corcoran ++ http://www.linuxnet.com (MUSCLE) ++ 4. The name of the author may not be used to endorse or promote products ++ derived from this software without specific prior written permission. ++ . ++ Changes to this license can be made only by the copyright author with ++ explicit written consent. ++ . ++ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR ++ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES ++ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ++ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, ++ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT ++ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF ++ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ . ++ --- end of LICENSE --- ++ . ++ ------------------------------------------------------------------------------ ++ . ++ %% This notice is provided with respect to PorterStemmer v4, which may be ++ included with JRE 8, JDK 8, and OpenJDK 8. ++ . ++ --- begin of LICENSE --- ++ . ++ See: http://tartarus.org/~martin/PorterStemmer ++ . ++ The software is completely free for any purpose, unless notes at the head of ++ the program text indicates otherwise (which is rare). In any case, the notes ++ about licensing are never more restrictive than the BSD License. ++ . ++ In every case where the software is not written by me (Martin Porter), this ++ licensing arrangement has been endorsed by the contributor, and it is ++ therefore unnecessary to ask the contributor again to confirm it. ++ . ++ I have not asked any contributors (or their employers, if they have them) for ++ proofs that they have the right to distribute their software in this way. ++ . ++ --- end of LICENSE --- ++ . ++ ------------------------------------------------------------------------------ ++ . ++ %% This notice is provided with respect to Relax NG Object/Parser v.20050510, ++ which may be included with JRE 8, JDK 8, and OpenJDK 8. ++ . ++ --- begin of LICENSE --- ++ . ++ Copyright (c) Kohsuke Kawaguchi ++ . ++ Permission is hereby granted, free of charge, to any person obtaining a copy ++ of this software and associated documentation files (the "Software"), to deal ++ in the Software without restriction, including without limitation the rights ++ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++ copies of the Software, and to permit persons to whom the Software is ++ furnished to do so, subject to the following conditions: The above copyright ++ notice and this permission notice shall be included in all copies or ++ substantial portions of the Software. ++ . ++ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE ++ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ++ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, ++ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE ++ SOFTWARE. ++ . ++ --- end of LICENSE --- ++ . ++ ------------------------------------------------------------------------------ ++ . ++ %% This notice is provided with respect to RelaxNGCC v1.12, which may be ++ included with JRE 8, JDK 8, and OpenJDK 8. ++ . ++ --- begin of LICENSE --- ++ . ++ Copyright (c) 2000-2003 Daisuke Okajima and Kohsuke Kawaguchi. ++ All rights reserved. ++ . ++ Redistribution and use in source and binary forms, with or without ++ modification, are permitted provided that the following conditions are met: ++ . ++ 1. Redistributions of source code must retain the above copyright notice, this ++ list of conditions and the following disclaimer. ++ . ++ 2. Redistributions in binary form must reproduce the above copyright notice, ++ this list of conditions and the following disclaimer in the documentation ++ and/or other materials provided with the distribution. ++ . ++ 3. The end-user documentation included with the redistribution, if any, must ++ include the following acknowledgment: ++ . ++ "This product includes software developed by Daisuke Okajima ++ and Kohsuke Kawaguchi (http://relaxngcc.sf.net/)." ++ . ++ Alternately, this acknowledgment may appear in the software itself, if and ++ wherever such third-party acknowledgments normally appear. ++ . ++ 4. The names of the copyright holders must not be used to endorse or promote ++ products derived from this software without prior written permission. For ++ written permission, please contact the copyright holders. ++ . ++ 5. Products derived from this software may not be called "RELAXNGCC", nor may ++ "RELAXNGCC" appear in their name, without prior written permission of the ++ copyright holders. ++ . ++ THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESSED OR IMPLIED WARRANTIES, ++ INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND ++ FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.IN NO EVENT SHALL THE APACHE ++ SOFTWARE FOUNDATION OR ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, ++ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR ++ PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF ++ LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING ++ NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, ++ EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ . ++ --- end of LICENSE --- ++ . ++ ------------------------------------------------------------------------------ ++ . ++ %% This notice is provided with respect to SAX 2.0.1, which may be included ++ with JRE 8, JDK 8, and OpenJDK 8. ++ . ++ --- begin of LICENSE --- ++ . ++ SAX is free! ++ . ++ In fact, it's not possible to own a license to SAX, since it's been placed in ++ the public domain. ++ . ++ No Warranty ++ . ++ Because SAX is released to the public domain, there is no warranty for the ++ design or for the software implementation, to the extent permitted by ++ applicable law. Except when otherwise stated in writing the copyright holders ++ and/or other parties provide SAX "as is" without warranty of any kind, either ++ expressed or implied, including, but not limited to, the implied warranties ++ of merchantability and fitness for a particular purpose. The entire risk as ++ to the quality and performance of SAX is with you. Should SAX prove ++ defective, you assume the cost of all necessary servicing, repair or ++ correction. ++ . ++ In no event unless required by applicable law or agreed to in writing will ++ any copyright holder, or any other party who may modify and/or redistribute ++ SAX, be liable to you for damages, including any general, special, incidental ++ or consequential damages arising out of the use or inability to use SAX ++ (including but not limited to loss of data or data being rendered inaccurate ++ or losses sustained by you or third parties or a failure of the SAX to ++ operate with any other programs), even if such holder or other party has been ++ advised of the possibility of such damages. ++ . ++ Copyright Disclaimers ++ . ++ This page includes statements to that effect by David Megginson, who would ++ have been able to claim copyright for the original work. SAX 1.0 ++ . ++ Version 1.0 of the Simple API for XML (SAX), created collectively by the ++ membership of the XML-DEV mailing list, is hereby released into the public ++ domain. ++ . ++ No one owns SAX: you may use it freely in both commercial and non-commercial ++ applications, bundle it with your software distribution, include it on a ++ CD-ROM, list the source code in a book, mirror the documentation at your own ++ web site, or use it in any other way you see fit. ++ . ++ David Megginson, sax@megginson.com ++ 1998-05-11 ++ . ++ SAX 2.0 ++ . ++ I hereby abandon any property rights to SAX 2.0 (the Simple API for XML), and ++ release all of the SAX 2.0 source code, compiled code, and documentation ++ contained in this distribution into the Public Domain. SAX comes with NO ++ WARRANTY or guarantee of fitness for any purpose. ++ . ++ David Megginson, david@megginson.com ++ 2000-05-05 ++ . ++ --- end of LICENSE --- ++ . ++ ------------------------------------------------------------------------------ ++ . ++ %% This notice is provided with respect to SoftFloat version 2b, which may be ++ included with JRE 8, JDK 8, and OpenJDK 8 on Linux/ARM. ++ . ++ --- begin of LICENSE --- ++ . ++ Use of any of this software is governed by the terms of the license below: ++ . ++ SoftFloat was written by me, John R. Hauser. This work was made possible in ++ part by the International Computer Science Institute, located at Suite 600, ++ 1947 Center Street, Berkeley, California 94704. Funding was partially ++ provided by the National Science Foundation under grant MIP-9311980. The ++ original version of this code was written as part of a project to build ++ a fixed-point vector processor in collaboration with the University of ++ California at Berkeley, overseen by Profs. Nelson Morgan and John Wawrzynek. ++ . ++ THIS SOFTWARE IS DISTRIBUTED AS IS, FOR FREE. Although reasonable effort ++ has been made to avoid it, THIS SOFTWARE MAY CONTAIN FAULTS THAT WILL AT ++ TIMES RESULT IN INCORRECT BEHAVIOR. USE OF THIS SOFTWARE IS RESTRICTED TO ++ PERSONS AND ORGANIZATIONS WHO CAN AND WILL TAKE FULL RESPONSIBILITY FOR ALL ++ LOSSES, COSTS, OR OTHER PROBLEMS THEY INCUR DUE TO THE SOFTWARE, AND WHO ++ FURTHERMORE EFFECTIVELY INDEMNIFY JOHN HAUSER AND THE INTERNATIONAL COMPUTER ++ SCIENCE INSTITUTE (possibly via similar legal warning) AGAINST ALL LOSSES, ++ COSTS, OR OTHER PROBLEMS INCURRED BY THEIR CUSTOMERS AND CLIENTS DUE TO THE ++ SOFTWARE. ++ . ++ Derivative works are acceptable, even for commercial purposes, provided ++ that the minimal documentation requirements stated in the source code are ++ satisfied. ++ . ++ --- end of LICENSE --- ++ . ++ ------------------------------------------------------------------------------ ++ . ++ %% This notice is provided with respect to Sparkle 1.5, ++ which may be included with JRE 8 on Mac OS X. ++ . ++ --- begin of LICENSE --- ++ . ++ Copyright (c) 2012 Sparkle.org and Andy Matuschak ++ . ++ Permission is hereby granted, free of charge, to any person obtaining a copy ++ of this software and associated documentation files (the "Software"), to deal ++ in the Software without restriction, including without limitation the rights ++ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++ copies of the Software, and to permit persons to whom the Software is ++ furnished to do so, subject to the following conditions: ++ . ++ The above copyright notice and this permission notice shall be included in all ++ copies or substantial portions of the Software. ++ . ++ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE ++ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ++ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, ++ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE ++ SOFTWARE. ++ . ++ --- end of LICENSE --- ++ . ++ ------------------------------------------------------------------------------ ++ . ++ %% Portions licensed from Taligent, Inc. ++ . ++ ------------------------------------------------------------------------------ ++ . ++ %% This notice is provided with respect to Thai Dictionary, which may be ++ included with JRE 8, JDK 8, and OpenJDK 8. ++ . ++ --- begin of LICENSE --- ++ . ++ Copyright (C) 1982 The Royal Institute, Thai Royal Government. ++ . ++ Copyright (C) 1998 National Electronics and Computer Technology Center, ++ National Science and Technology Development Agency, ++ Ministry of Science Technology and Environment, ++ Thai Royal Government. ++ . ++ Permission is hereby granted, free of charge, to any person obtaining a copy ++ of this software and associated documentation files (the "Software"), to deal ++ in the Software without restriction, including without limitation the rights ++ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++ copies of the Software, and to permit persons to whom the Software is ++ furnished to do so, subject to the following conditions: ++ . ++ The above copyright notice and this permission notice shall be included in all ++ copies or substantial portions of the Software. ++ . ++ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE ++ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ++ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, ++ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE ++ SOFTWARE. ++ . ++ --- end of LICENSE --- ++ . ++ ------------------------------------------------------------------------------ ++ . ++ %% This notice is provided with respect to Unicode 6.2.0 & CLDR 21.0.1 ++ which may be included with JRE 8, JDK 8, and OpenJDK 8. ++ . ++ --- begin of LICENSE --- ++ . ++ Unicode Terms of Use ++ . ++ For the general privacy policy governing access to this site, see the Unicode ++ Privacy Policy. For trademark usage, see the Unicode® Consortium Name and ++ Trademark Usage Policy. ++ . ++ A. Unicode Copyright. ++ 1. Copyright © 1991-2013 Unicode, Inc. All rights reserved. ++ . ++ 2. Certain documents and files on this website contain a legend indicating ++ that "Modification is permitted." Any person is hereby authorized, ++ without fee, to modify such documents and files to create derivative ++ works conforming to the Unicode® Standard, subject to Terms and ++ Conditions herein. ++ . ++ 3. Any person is hereby authorized, without fee, to view, use, reproduce, ++ and distribute all documents and files solely for informational ++ purposes in the creation of products supporting the Unicode Standard, ++ subject to the Terms and Conditions herein. ++ . ++ 4. Further specifications of rights and restrictions pertaining to the use ++ of the particular set of data files known as the "Unicode Character ++ Database" can be found in Exhibit 1. ++ . ++ 5. Each version of the Unicode Standard has further specifications of ++ rights and restrictions of use. For the book editions (Unicode 5.0 and ++ earlier), these are found on the back of the title page. The online ++ code charts carry specific restrictions. All other files, including ++ online documentation of the core specification for Unicode 6.0 and ++ later, are covered under these general Terms of Use. ++ . ++ 6. No license is granted to "mirror" the Unicode website where a fee is ++ charged for access to the "mirror" site. ++ . ++ 7. Modification is not permitted with respect to this document. All copies ++ of this document must be verbatim. ++ . ++ B. Restricted Rights Legend. Any technical data or software which is licensed ++ to the United States of America, its agencies and/or instrumentalities ++ under this Agreement is commercial technical data or commercial computer ++ software developed exclusively at private expense as defined in FAR 2.101, ++ or DFARS 252.227-7014 (June 1995), as applicable. For technical data, use, ++ duplication, or disclosure by the Government is subject to restrictions as ++ set forth in DFARS 202.227-7015 Technical Data, Commercial and Items (Nov ++ 1995) and this Agreement. For Software, in accordance with FAR 12-212 or ++ DFARS 227-7202, as applicable, use, duplication or disclosure by the ++ Government is subject to the restrictions set forth in this Agreement. ++ . ++ C. Warranties and Disclaimers. ++ 1. This publication and/or website may include technical or typographical ++ errors or other inaccuracies . Changes are periodically added to the ++ information herein; these changes will be incorporated in new editions ++ of the publication and/or website. Unicode may make improvements and/or ++ changes in the product(s) and/or program(s) described in this ++ publication and/or website at any time. ++ . ++ 2. If this file has been purchased on magnetic or optical media from ++ Unicode, Inc. the sole and exclusive remedy for any claim will be ++ exchange of the defective media within ninety (90) days of original ++ purchase. ++ . ++ 3. EXCEPT AS PROVIDED IN SECTION C.2, THIS PUBLICATION AND/OR SOFTWARE IS ++ PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND EITHER EXPRESS, IMPLIED, ++ OR STATUTORY, INCLUDING, BUT NOT LIMITED TO, ANY WARRANTIES OF ++ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT. ++ UNICODE AND ITS LICENSORS ASSUME NO RESPONSIBILITY FOR ERRORS OR ++ OMISSIONS IN THIS PUBLICATION AND/OR SOFTWARE OR OTHER DOCUMENTS WHICH ++ ARE REFERENCED BY OR LINKED TO THIS PUBLICATION OR THE UNICODE WEBSITE. ++ . ++ D. Waiver of Damages. In no event shall Unicode or its licensors be liable for ++ any special, incidental, indirect or consequential damages of any kind, or ++ any damages whatsoever, whether or not Unicode was advised of the ++ possibility of the damage, including, without limitation, those resulting ++ from the following: loss of use, data or profits, in connection with the ++ use, modification or distribution of this information or its derivatives. ++ . ++ E.Trademarks & Logos. ++ 1. The Unicode Word Mark and the Unicode Logo are trademarks of Unicode, ++ Inc. “The Unicode Consortium” and “Unicode, Inc.” are trade names of ++ Unicode, Inc. Use of the information and materials found on this ++ website indicates your acknowledgement of Unicode, Inc.’s exclusive ++ worldwide rights in the Unicode Word Mark, the Unicode Logo, and the ++ Unicode trade names. ++ . ++ 2. The Unicode Consortium Name and Trademark Usage Policy (“Trademark ++ Policy”) are incorporated herein by reference and you agree to abide by ++ the provisions of the Trademark Policy, which may be changed from time ++ to time in the sole discretion of Unicode, Inc. ++ . ++ 3. All third party trademarks referenced herein are the property of their ++ respective owners. ++ . ++ Miscellaneous. ++ 1. Jurisdiction and Venue. This server is operated from a location in the ++ State of California, United States of America. Unicode makes no ++ representation that the materials are appropriate for use in other ++ locations. If you access this server from other locations, you are ++ responsible for compliance with local laws. This Agreement, all use of ++ this site and any claims and damages resulting from use of this site are ++ governed solely by the laws of the State of California without regard to ++ any principles which would apply the laws of a different jurisdiction. ++ The user agrees that any disputes regarding this site shall be resolved ++ solely in the courts located in Santa Clara County, California. The user ++ agrees said courts have personal jurisdiction and agree to waive any ++ right to transfer the dispute to any other forum. ++ . ++ 2. Modification by Unicode. Unicode shall have the right to modify this ++ Agreement at any time by posting it to this site. The user may not ++ assign any part of this Agreement without Unicode’s prior written ++ consent. ++ . ++ 3. Taxes. The user agrees to pay any taxes arising from access to this ++ website or use of the information herein, except for those based on ++ Unicode’s net income. ++ . ++ 4. Severability. If any provision of this Agreement is declared invalid or ++ unenforceable, the remaining provisions of this Agreement shall remain ++ in effect. ++ . ++ 5. Entire Agreement. This Agreement constitutes the entire agreement ++ between the parties. ++ . ++ EXHIBIT 1 ++ UNICODE, INC. LICENSE AGREEMENT - DATA FILES AND SOFTWARE ++ . ++ Unicode Data Files include all data files under the directories ++ http://www.unicode.org/Public/, http://www.unicode.org/reports/, and ++ http://www.unicode.org/cldr/data/. Unicode Data Files do not include PDF ++ online code charts under the directory http://www.unicode.org/Public/. ++ Software includes any source code published in the Unicode Standard or under ++ the directories http://www.unicode.org/Public/, ++ http://www.unicode.org/reports/, and http://www.unicode.org/cldr/data/. ++ . ++ NOTICE TO USER: Carefully read the following legal agreement. BY DOWNLOADING, ++ INSTALLING, COPYING OR OTHERWISE USING UNICODE INC.'S DATA FILES ("DATA ++ FILES"), AND/OR SOFTWARE ("SOFTWARE"), YOU UNEQUIVOCALLY ACCEPT, AND AGREE TO ++ BE BOUND BY, ALL OF THE TERMS AND CONDITIONS OF THIS AGREEMENT. IF YOU DO NOT ++ AGREE, DO NOT DOWNLOAD, INSTALL, COPY, DISTRIBUTE OR USE THE DATA FILES OR ++ SOFTWARE. ++ . ++ COPYRIGHT AND PERMISSION NOTICE ++ . ++ Copyright © 1991-2012 Unicode, Inc. All rights reserved. Distributed under the ++ Terms of Use in http://www.unicode.org/copyright.html. ++ . ++ Permission is hereby granted, free of charge, to any person obtaining a copy ++ of the Unicode data files and any associated documentation (the "Data Files") ++ or Unicode software and any associated documentation (the "Software") to deal ++ in the Data Files or Software without restriction, including without ++ limitation the rights to use, copy, modify, merge, publish, distribute, and/or ++ sell copies of the Data Files or Software, and to permit persons to whom the ++ Data Files or Software are furnished to do so, provided that (a) the above ++ copyright notice(s) and this permission notice appear with all copies of the ++ Data Files or Software, (b) both the above copyright notice(s) and this ++ permission notice appear in associated documentation, and (c) there is clear ++ notice in each modified Data File or in the Software as well as in the ++ documentation associated with the Data File(s) or Software that the data or ++ software has been modified. ++ . ++ THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY ++ KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD ++ PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN ++ THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL ++ DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR ++ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ++ ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THE ++ DATA FILES OR SOFTWARE. ++ . ++ Except as contained in this notice, the name of a copyright holder shall not ++ be used in advertising or otherwise to promote the sale, use or other dealings ++ in these Data Files or Software without prior written authorization of the ++ copyright holder. ++ . ++ Unicode and the Unicode logo are trademarks of Unicode, Inc. in the United ++ States and other countries. All third party trademarks referenced herein are ++ the property of their respective owners. ++ . ++ --- end of LICENSE --- ++ . ++ ------------------------------------------------------------------------------ ++ . ++ %% This notice is provided with respect to UPX v3.01, which may be included ++ with JRE 8 on Windows. ++ . ++ --- begin of LICENSE --- ++ . ++ Use of any of this software is governed by the terms of the license below: ++ . ++ . ++ ooooo ooo ooooooooo. ooooooo ooooo ++ `888' `8' `888 `Y88. `8888 d8' ++ 888 8 888 .d88' Y888..8P ++ 888 8 888ooo88P' `8888' ++ 888 8 888 .8PY888. ++ `88. .8' 888 d8' `888b ++ `YbodP' o888o o888o o88888o ++ . ++ . ++ The Ultimate Packer for eXecutables ++ Copyright (c) 1996-2000 Markus Oberhumer & Laszlo Molnar ++ http://wildsau.idv.uni-linz.ac.at/mfx/upx.html ++ http://www.nexus.hu/upx ++ http://upx.tsx.org ++ . ++ . ++ PLEASE CAREFULLY READ THIS LICENSE AGREEMENT, ESPECIALLY IF YOU PLAN ++ TO MODIFY THE UPX SOURCE CODE OR USE A MODIFIED UPX VERSION. ++ . ++ . ++ ABSTRACT ++ ======== ++ . ++ UPX and UCL are copyrighted software distributed under the terms ++ of the GNU General Public License (hereinafter the "GPL"). ++ . ++ The stub which is imbedded in each UPX compressed program is part ++ of UPX and UCL, and contains code that is under our copyright. The ++ terms of the GNU General Public License still apply as compressing ++ a program is a special form of linking with our stub. ++ . ++ As a special exception we grant the free usage of UPX for all ++ executables, including commercial programs. ++ See below for details and restrictions. ++ . ++ . ++ COPYRIGHT ++ ========= ++ . ++ UPX and UCL are copyrighted software. All rights remain with the authors. ++ . ++ UPX is Copyright (C) 1996-2000 Markus Franz Xaver Johannes Oberhumer ++ UPX is Copyright (C) 1996-2000 Laszlo Molnar ++ . ++ UCL is Copyright (C) 1996-2000 Markus Franz Xaver Johannes Oberhumer ++ . ++ . ++ GNU GENERAL PUBLIC LICENSE ++ ========================== ++ . ++ UPX and the UCL library are free software; you can redistribute them ++ and/or modify them under the terms of the GNU General Public License as ++ published by the Free Software Foundation; either version 2 of ++ the License, or (at your option) any later version. ++ . ++ UPX and UCL are distributed in the hope that they will be useful, ++ but WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ GNU General Public License for more details. ++ . ++ You should have received a copy of the GNU General Public License ++ along with this program; see the file COPYING. ++ . ++ . ++ SPECIAL EXCEPTION FOR COMPRESSED EXECUTABLES ++ ============================================ ++ . ++ The stub which is imbedded in each UPX compressed program is part ++ of UPX and UCL, and contains code that is under our copyright. The ++ terms of the GNU General Public License still apply as compressing ++ a program is a special form of linking with our stub. ++ . ++ Hereby Markus F.X.J. Oberhumer and Laszlo Molnar grant you special ++ permission to freely use and distribute all UPX compressed programs ++ (including commercial ones), subject to the following restrictions: ++ . ++ 1. You must compress your program with a completely unmodified UPX ++ version; either with our precompiled version, or (at your option) ++ with a self compiled version of the unmodified UPX sources as ++ distributed by us. ++ 2. This also implies that the UPX stub must be completely unmodfied, i.e. ++ the stub imbedded in your compressed program must be byte-identical ++ to the stub that is produced by the official unmodified UPX version. ++ 3. The decompressor and any other code from the stub must exclusively get ++ used by the unmodified UPX stub for decompressing your program at ++ program startup. No portion of the stub may get read, copied, ++ called or otherwise get used or accessed by your program. ++ . ++ . ++ ANNOTATIONS ++ =========== ++ . ++ - You can use a modified UPX version or modified UPX stub only for ++ programs that are compatible with the GNU General Public License. ++ . ++ - We grant you special permission to freely use and distribute all UPX ++ compressed programs. But any modification of the UPX stub (such as, ++ but not limited to, removing our copyright string or making your ++ program non-decompressible) will immediately revoke your right to ++ use and distribute a UPX compressed program. ++ . ++ - UPX is not a software protection tool; by requiring that you use ++ the unmodified UPX version for your proprietary programs we ++ make sure that any user can decompress your program. This protects ++ both you and your users as nobody can hide malicious code - ++ any program that cannot be decompressed is highly suspicious ++ by definition. ++ . ++ - You can integrate all or part of UPX and UCL into projects that ++ are compatible with the GNU GPL, but obviously you cannot grant ++ any special exceptions beyond the GPL for our code in your project. ++ . ++ - We want to actively support manufacturers of virus scanners and ++ similar security software. Please contact us if you would like to ++ incorporate parts of UPX or UCL into such a product. ++ . ++ . ++ . ++ Markus F.X.J. Oberhumer Laszlo Molnar ++ markus.oberhumer@jk.uni-linz.ac.at ml1050@cdata.tvnet.hu ++ . ++ Linz, Austria, 25 Feb 2000 ++ . ++ Additional License(s) ++ . ++ The UPX license file is at http://upx.sourceforge.net/upx-license.html. ++ . ++ --- end of LICENSE --- ++ . ++ ------------------------------------------------------------------------------ ++ . ++ %% This notice is provided with respect to Xfree86-VidMode Extension 1.0, ++ which may be included with JRE 8, JDK 8, and OpenJDK 8 on Linux and Solaris. ++ . ++ --- begin of LICENSE --- ++ . ++ Version 1.1 of XFree86 ProjectLicence. ++ . ++ Copyright (C) 1994-2004 The XFree86 Project, Inc. All rights reserved. ++ . ++ Permission is hereby granted, free of charge, to any person obtaining a copy of ++ this software and associated documentation files (the "Software"), to deal ++ in the Software without restriction, including without limitation the rights ++ to use, copy, modify, merge, publish, distribute, sublicence, and/or sell ++ copies of the Software, and to permit persons to whom the Software is furnished ++ to do so,subject to the following conditions: ++ . ++ 1. Redistributions of source code must retain the above copyright ++ notice,this list of conditions, and the following disclaimer. ++ . ++ 2. Redistributions in binary form must reproduce the above copyright notice, ++ this list of conditions and the following disclaimer in the documentation ++ and/or other materials provided with the distribution, and in the same place ++ and form as other copyright, license and disclaimer information. ++ . ++ 3. The end-user documentation included with the redistribution, if any,must ++ include the following acknowledgment: "This product includes ++ software developed by The XFree86 Project, Inc (http://www.xfree86.org/) and ++ its contributors", in the same place and form as other third-party ++ acknowledgments. Alternately, this acknowledgment may appear in the software ++ itself, in the same form and location as other such third-party ++ acknowledgments. ++ . ++ 4. Except as contained in this notice, the name of The XFree86 Project,Inc ++ shall not be used in advertising or otherwise to promote the sale, use ++ or other dealings in this Software without prior written authorization from ++ The XFree86 Project, Inc. ++ . ++ THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED ++ WARRANTIES,INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF ++ MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO ++ EVENT SHALL THE XFREE86 PROJECT, INC OR ITS CONTRIBUTORS BE LIABLE FOR ANY ++ DIRECT, INDIRECT, INCIDENTAL,SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ (INCLUDING, BUT NOT LIMITED TO,PROCUREMENT OF SUBSTITUTE GOODS OR ++ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER ++ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT ++ LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY ++ OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH ++ DAMAGE. ++ . ++ --- end of LICENSE --- ++ . ++ ------------------------------------------------------------------------------ ++ . ++ %% This notice is provided with respect to X Window System 6.8.2, which may be ++ included with JRE 8, JDK 8, and OpenJDK 8 on Linux and Solaris. ++ . ++ --- begin of LICENSE --- ++ . ++ Licenses ++ The X.Org Foundation March 2004 ++ . ++ 1. Introduction ++ . ++ The X.org Foundation X Window System distribution is a compilation of code and ++ documentation from many sources. This document is intended primarily as a ++ guide to the licenses used in the distribution: you must check each file ++ and/or package for precise redistribution terms. None-the-less, this summary ++ may be useful to many users. No software incorporating the XFree86 1.1 license ++ has been incorporated. ++ . ++ This document is based on the compilation from XFree86. ++ . ++ 2. XFree86 License ++ . ++ XFree86 code without an explicit copyright is covered by the following ++ copyright/license: ++ . ++ Copyright (C) 1994-2003 The XFree86 Project, Inc. All Rights Reserved. ++ . ++ Permission is hereby granted, free of charge, to any person obtaining a copy ++ of this software and associated documentation files (the "Software"), to deal ++ in the Software without restriction, including without limitation the rights ++ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++ copies of the Software, and to permit persons to whom the Software is ++ furnished to do so, subject to the following conditions: ++ . ++ The above copyright notice and this permission notice shall be included in all ++ copies or substantial portions of the Software. ++ . ++ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE ++ XFREE86 PROJECT BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++ IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ . ++ Except as contained in this notice, the name of the XFree86 Project shall not ++ be used in advertising or otherwise to promote the sale, use or other dealings ++ in this Software without prior written authorization from the XFree86 Project. ++ . ++ 3. Other Licenses ++ . ++ Portions of code are covered by the following licenses/copyrights. See ++ individual files for the copyright dates. ++ . ++ 3.1. X/MIT Copyrights ++ . ++ 3.1.1. X Consortium ++ . ++ Copyright (C) X Consortium ++ . ++ Permission is hereby granted, free of charge, to any person obtaining a copy ++ of this software and associated documentation files (the "Software"), to deal ++ in the Software without restriction, including without limitation the rights ++ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++ copies of the Software, and to permit persons to whom the Software is ++ furnished to do so, subject to the following conditions: ++ . ++ The above copyright notice and this permission notice shall be included in all ++ copies or substantial portions of the Software. ++ . ++ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE X ++ CONSORTIUM BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ++ ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION ++ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ . ++ Except as contained in this notice, the name of the X Consortium shall not be ++ used in advertising or otherwise to promote the sale, use or other dealings in ++ this Software without prior written authorization from the X Consortium. ++ . ++ X Window System is a trademark of X Consortium, Inc. ++ . ++ 3.1.2. The Open Group ++ . ++ Copyright The Open Group ++ . ++ Permission to use, copy, modify, distribute, and sell this software and its ++ documentation for any purpose is hereby granted without fee, provided that the ++ above copyright notice appear in all copies and that both that copyright ++ notice and this permission notice appear in supporting documentation. ++ . ++ The above copyright notice and this permission notice shall be included in all ++ copies or substantial portions of the Software. ++ . ++ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE ++ OPEN GROUP BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ++ ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION ++ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ . ++ Except as contained in this notice, the name of The Open Group shall not be ++ used in advertising or otherwise to promote the sale, use or other dealings in ++ this Software without prior written authorization from The Open Group. 3.2. ++ Berkeley-based copyrights: ++ . ++ o ++ 3.2.1. General ++ . ++ Redistribution and use in source and binary forms, with or without ++ modification, are permitted provided that the following conditions are met: ++ . ++ 1. Redistributions of source code must retain the above copyright notice, ++ this list of conditions and the following disclaimer. ++ . ++ 2. Redistributions in binary form must reproduce the above copyright ++ notice, this list of conditions and the following disclaimer in the ++ documentation and/or other materials provided with the distribution. ++ . ++ 3. The name of the author may not be used to endorse or promote products ++ derived from this software without specific prior written permission. ++ . ++ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED ++ WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF ++ MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO ++ EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, ++ PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR ++ BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER ++ IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ POSSIBILITY OF SUCH DAMAGE. 3.2.2. UCB/LBL ++ . ++ Copyright (c) 1993 The Regents of the University of California. All rights ++ reserved. ++ . ++ This software was developed by the Computer Systems Engineering group at ++ Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and contributed to ++ Berkeley. ++ . ++ All advertising materials mentioning features or use of this software must ++ display the following acknowledgement: This product includes software ++ developed by the University of California, Lawrence Berkeley Laboratory. ++ . ++ Redistribution and use in source and binary forms, with or without ++ modification, are permitted provided that the following conditions are met: ++ . ++ 1. Redistributions of source code must retain the above copyright notice, ++ this list of conditions and the following disclaimer. ++ . ++ 2. Redistributions in binary form must reproduce the above copyright ++ notice, this list of conditions and the following disclaimer in the ++ documentation and/or other materials provided with the distribution. ++ . ++ 3. All advertising materials mentioning features or use of this software ++ must display the following acknowledgement: This product includes software ++ developed by the University of California, Berkeley and its contributors. ++ . ++ 4. Neither the name of the University nor the names of its contributors may ++ be used to endorse or promote products derived from this software without ++ specific prior written permission. ++ . ++ THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY ++ EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY ++ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ++ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 3.2.3. The ++ NetBSD Foundation, Inc. ++ . ++ Copyright (c) 2003 The NetBSD Foundation, Inc. All rights reserved. ++ . ++ This code is derived from software contributed to The NetBSD Foundation by Ben ++ Collver ++ . ++ Redistribution and use in source and binary forms, with or without ++ modification, are permitted provided that the following conditions are met: ++ . ++ 1. Redistributions of source code must retain the above copyright notice, ++ this list of conditions and the following disclaimer. ++ . ++ 2. Redistributions in binary form must reproduce the above copyright ++ notice, this list of conditions and the following disclaimer in the ++ documentation and/or other materials provided with the distribution. ++ . ++ 3. All advertising materials mentioning features or use of this software ++ must display the following acknowledgement: This product includes software ++ developed by the NetBSD Foundation, Inc. and its contributors. ++ . ++ 4. Neither the name of The NetBSD Foundation nor the names of its ++ contributors may be used to endorse or promote products derived from this ++ software without specific prior written permission. ++ . ++ THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS ``AS ++ IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS BE LIABLE FOR ANY ++ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ++ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 3.2.4. Theodore ++ Ts'o. ++ . ++ Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999. All rights ++ reserved. ++ . ++ Redistribution and use in source and binary forms, with or without ++ modification, are permitted provided that the following conditions are met: ++ . ++ 1. Redistributions of source code must retain the above copyright notice, ++ and the entire permission notice in its entirety, including the disclaimer ++ of warranties. ++ . ++ 2. Redistributions in binary form must reproduce the above copyright ++ notice, this list of conditions and the following disclaimer in the ++ documentation and/or other materials provided with the distribution. ++ . ++ 3. he name of the author may not be used to endorse or promote products ++ derived from this software without specific prior written permission. ++ . ++ THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, ++ INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND ++ FITNESS FOR A PARTICULAR PURPOSE, ALL OF WHICH ARE HEREBY DISCLAIMED. IN NO ++ EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, ++ PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR ++ BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER ++ IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE ++ POSSIBILITY OF SUCH DAMAGE. 3.2.5. Theo de Raadt and Damien Miller ++ . ++ Copyright (c) 1995,1999 Theo de Raadt. All rights reserved. Copyright (c) ++ 2001-2002 Damien Miller. All rights reserved. ++ . ++ Redistribution and use in source and binary forms, with or without ++ modification, are permitted provided that the following conditions are met: ++ . ++ 1. Redistributions of source code must retain the above copyright notice, ++ this list of conditions and the following disclaimer. ++ . ++ 2. Redistributions in binary form must reproduce the above copyright ++ notice, this list of conditions and the following disclaimer in the ++ documentation and/or other materials provided with the distribution. ++ . ++ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED ++ WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF ++ MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO ++ EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, ++ PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR ++ BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER ++ IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ POSSIBILITY OF SUCH DAMAGE. 3.2.6. Todd C. Miller ++ . ++ Copyright (c) 1998 Todd C. Miller ++ . ++ Permission to use, copy, modify, and distribute this software for any purpose ++ with or without fee is hereby granted, provided that the above copyright ++ notice and this permission notice appear in all copies. ++ . ++ THE SOFTWARE IS PROVIDED "AS IS" AND TODD C. MILLER DISCLAIMS ALL WARRANTIES ++ WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL TODD C. MILLER BE LIABLE FOR ++ ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++ OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++ CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 3.2.7. Thomas ++ Winischhofer ++ . ++ Copyright (C) 2001-2004 Thomas Winischhofer ++ . ++ Redistribution and use in source and binary forms, with or without ++ modification, are permitted provided that the following conditions are met: ++ . ++ 1. Redistributions of source code must retain the above copyright notice, ++ this list of conditions and the following disclaimer. ++ . ++ 2. Redistributions in binary form must reproduce the above copyright ++ notice, this list of conditions and the following disclaimer in the ++ documentation and/or other materials provided with the distribution. ++ . ++ 3. The name of the author may not be used to endorse or promote products ++ derived from this software without specific prior written permission. ++ . ++ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESSED OR IMPLIED ++ WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF ++ MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO ++ EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, ++ PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR ++ BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER ++ IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ POSSIBILITY OF SUCH DAMAGE. 3.3. NVIDIA Corp ++ . ++ Copyright (c) 1996 NVIDIA, Corp. All rights reserved. ++ . ++ NOTICE TO USER: The source code is copyrighted under U.S. and international ++ laws. NVIDIA, Corp. of Sunnyvale, California owns the copyright and as design ++ patents pending on the design and interface of the NV chips. Users and ++ possessors of this source code are hereby granted a nonexclusive, royalty-free ++ copyright and design patent license to use this code in individual and ++ commercial software. ++ . ++ Any use of this source code must include, in the user documentation and ++ internal comments to the code, notices to the end user as follows: ++ . ++ Copyright (c) 1996 NVIDIA, Corp. NVIDIA design patents pending in the U.S. and ++ foreign countries. ++ . ++ NVIDIA, CORP. MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE ++ CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED ++ WARRANTY OF ANY KIND. NVIDIA, CORP. DISCLAIMS ALL WARRANTIES WITH REGARD TO ++ THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND ++ FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT SHALL NVIDIA, CORP. BE LIABLE ++ FOR ANY SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY ++ DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ++ ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++ CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOURCE CODE. 3.4. GLX Public ++ License ++ . ++ GLX PUBLIC LICENSE (Version 1.0 (2/11/99)) ("License") ++ . ++ Subject to any third party claims, Silicon Graphics, Inc. ("SGI") hereby ++ grants permission to Recipient (defined below), under Recipient's copyrights ++ in the Original Software (defined below), to use, copy, modify, merge, ++ publish, distribute, sublicense and/or sell copies of Subject Software ++ (defined below), and to permit persons to whom the Subject Software is ++ furnished in accordance with this License to do the same, subject to all of ++ the following terms and conditions, which Recipient accepts by engaging in any ++ such use, copying, modifying, merging, publishing, distributing, sublicensing ++ or selling: ++ . ++ 1. Definitions. ++ . ++ (a) "Original Software" means source code of computer software code which ++ is described in Exhibit A as Original Software. ++ . ++ (b) "Modifications" means any addition to or deletion from the substance ++ or structure of either the Original Software or any previous ++ Modifications. When Subject Software is released as a series of files, a ++ Modification means (i) any addition to or deletion from the contents of a ++ file containing Original Software or previous Modifications and (ii) any ++ new file that contains any part of the Original Code or previous ++ Modifications. ++ . ++ (c) "Subject Software" means the Original Software or Modifications or the ++ combination of the Original Software and Modifications, or portions of any ++ of the foregoing. ++ . ++ (d) "Recipient" means an individual or a legal entity exercising rights ++ under, and complying with all of the terms of, this License. For legal ++ entities, "Recipient" includes any entity which controls, is controlled ++ by, or is under common control with Recipient. For purposes of this ++ definition, "control" of an entity means (a) the power, direct or ++ indirect, to direct or manage such entity, or (b) ownership of fifty ++ percent (50%) or more of the outstanding shares or beneficial ownership of ++ such entity. ++ . ++ 2. Redistribution of Source Code Subject to These Terms. Redistributions of ++ Subject Software in source code form must retain the notice set forth in ++ Exhibit A, below, in every file. A copy of this License must be included in ++ any documentation for such Subject Software where the recipients' rights ++ relating to Subject Software are described. Recipient may distribute the ++ source code version of Subject Software under a license of Recipient's choice, ++ which may contain terms different from this License, provided that (i) ++ Recipient is in compliance with the terms of this License, and (ii) the ++ license terms include this Section 2 and Sections 3, 4, 7, 8, 10, 12 and 13 of ++ this License, which terms may not be modified or superseded by any other terms ++ of such license. If Recipient distributes the source code version under a ++ different license Recipient must make it absolutely clear that any terms which ++ differ from this License are offered by Recipient alone, not by SGI. Recipient ++ hereby agrees to indemnify SGI for any liability incurred by SGI as a result ++ of any such terms Recipient offers. ++ . ++ 3. Redistribution in Executable Form. The notice set forth in Exhibit A must ++ be conspicuously included in any notice in an executable version of Subject ++ Software, related documentation or collateral in which Recipient describes the ++ user's rights relating to the Subject Software. Recipient may distribute the ++ executable version of Subject Software under a license of Recipient's choice, ++ which may contain terms different from this License, provided that (i) ++ Recipient is in compliance with the terms of this License, and (ii) the ++ license terms include this Section 3 and Sections 4, 7, 8, 10, 12 and 13 of ++ this License, which terms may not be modified or superseded by any other terms ++ of such license. If Recipient distributes the executable version under a ++ different license Recipient must make it absolutely clear that any terms which ++ differ from this License are offered by Recipient alone, not by SGI. Recipient ++ hereby agrees to indemnify SGI for any liability incurred by SGI as a result ++ of any such terms Recipient offers. ++ . ++ 4. Termination. This License and the rights granted hereunder will terminate ++ automatically if Recipient fails to comply with terms herein and fails to cure ++ such breach within 30 days of the breach. Any sublicense to the Subject ++ Software which is properly granted shall survive any termination of this ++ License absent termination by the terms of such sublicense. Provisions which, ++ by their nature, must remain in effect beyond the termination of this License ++ shall survive. ++ . ++ 5. No Trademark Rights. This License does not grant any rights to use any ++ trade name, trademark or service mark whatsoever. No trade name, trademark or ++ service mark of SGI may be used to endorse or promote products derived from ++ the Subject Software without prior written permission of SGI. ++ . ++ 6. No Other Rights. This License does not grant any rights with respect to the ++ OpenGL API or to any software or hardware implementation thereof or to any ++ other software whatsoever, nor shall any other rights or licenses not ++ expressly granted hereunder arise by implication, estoppel or otherwise with ++ respect to the Subject Software. Title to and ownership of the Original ++ Software at all times remains with SGI. All rights in the Original Software ++ not expressly granted under this License are reserved. ++ . ++ 7. Compliance with Laws; Non-Infringement. Recipient shall comply with all ++ applicable laws and regulations in connection with use and distribution of the ++ Subject Software, including but not limited to, all export and import control ++ laws and regulations of the U.S. government and other countries. Recipient may ++ not distribute Subject Software that (i) in any way infringes (directly or ++ contributorily) the rights (including patent, copyright, trade secret, ++ trademark or other intellectual property rights of any kind) of any other ++ person or entity or (ii) breaches any representation or warranty, express, ++ implied or statutory, which under any applicable law it might be deemed to ++ have been distributed. ++ . ++ 8. Claims of Infringement. If Recipient at any time has knowledge of any one ++ or more third party claims that reproduction, modification, use, distribution, ++ import or sale of Subject Software (including particular functionality or code ++ incorporated in Subject Software) infringes the third party's intellectual ++ property rights, Recipient must place in a well-identified web page bearing ++ the title "LEGAL" a description of each such claim and a description of the ++ party making each such claim in sufficient detail that a user of the Subject ++ Software will know whom to contact regarding the claim. Also, upon gaining ++ such knowledge of any such claim, Recipient must conspicuously include the URL ++ for such web page in the Exhibit A notice required under Sections 2 and 3, ++ above, and in the text of any related documentation, license agreement or ++ collateral in which Recipient describes end user's rights relating to the ++ Subject Software. If Recipient obtains such knowledge after it makes Subject ++ Software available to any other person or entity, Recipient shall take other ++ steps (such as notifying appropriate mailing lists or newsgroups) reasonably ++ calculated to inform those who received the Subject Software that new ++ knowledge has been obtained. ++ . ++ 9. DISCLAIMER OF WARRANTY. SUBJECT SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, ++ WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, WITHOUT ++ LIMITATION, WARRANTIES THAT THE SUBJECT SOFTWARE IS FREE OF DEFECTS, ++ MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR NON- INFRINGING. SGI ASSUMES NO ++ RISK AS TO THE QUALITY AND PERFORMANCE OF THE SOFTWARE. SHOULD ANY SOFTWARE ++ PROVE DEFECTIVE IN ANY RESPECT, SGI ASSUMES NO COST OR LIABILITY FOR ANY ++ SERVICING, REPAIR OR CORRECTION. THIS DISCLAIMER OF WARRANTY CONSTITUTES AN ++ ESSENTIAL PART OF THIS LICENSE. NO USE OF ANY SUBJECT SOFTWARE IS AUTHORIZED ++ HEREUNDER EXCEPT UNDER THIS DISCLAIMER. ++ . ++ 10. LIMITATION OF LIABILITY. UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, ++ WHETHER TORT (INCLUDING, WITHOUT LIMITATION, NEGLIGENCE OR STRICT LIABILITY), ++ CONTRACT, OR OTHERWISE, SHALL SGI OR ANY SGI LICENSOR BE LIABLE FOR ANY ++ DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES OF ANY ++ CHARACTER INCLUDING, WITHOUT LIMITATION, DAMAGES FOR LOSS OF GOODWILL, WORK ++ STOPPAGE, LOSS OF DATA, COMPUTER FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER ++ COMMERCIAL DAMAGES OR LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN INFORMED OF ++ THE POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF LIABILITY SHALL NOT APPLY ++ TO LIABILITY FOR DEATH OR PERSONAL INJURY RESULTING FROM SGI's NEGLIGENCE TO ++ THE EXTENT APPLICABLE LAW PROHIBITS SUCH LIMITATION. SOME JURISDICTIONS DO NOT ++ ALLOW THE EXCLUSION OR LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO ++ THAT EXCLUSION AND LIMITATION MAY NOT APPLY TO RECIPIENT. ++ . ++ 11. Indemnity. Recipient shall be solely responsible for damages arising, ++ directly or indirectly, out of its utilization of rights under this License. ++ Recipient will defend, indemnify and hold harmless Silicon Graphics, Inc. from ++ and against any loss, liability, damages, costs or expenses (including the ++ payment of reasonable attorneys fees) arising out of Recipient's use, ++ modification, reproduction and distribution of the Subject Software or out of ++ any representation or warranty made by Recipient. ++ . ++ 12. U.S. Government End Users. The Subject Software is a "commercial item" ++ consisting of "commercial computer software" as such terms are defined in ++ title 48 of the Code of Federal Regulations and all U.S. Government End Users ++ acquire only the rights set forth in this License and are subject to the terms ++ of this License. ++ . ++ 13. Miscellaneous. This License represents the complete agreement concerning ++ subject matter hereof. If any provision of this License is held to be ++ unenforceable, such provision shall be reformed so as to achieve as nearly as ++ possible the same economic effect as the original provision and the remainder ++ of this License will remain in effect. This License shall be governed by and ++ construed in accordance with the laws of the United States and the State of ++ California as applied to agreements entered into and to be performed entirely ++ within California between California residents. Any litigation relating to ++ this License shall be subject to the exclusive jurisdiction of the Federal ++ Courts of the Northern District of California (or, absent subject matter ++ jurisdiction in such courts, the courts of the State of California), with ++ venue lying exclusively in Santa Clara County, California, with the losing ++ party responsible for costs, including without limitation, court costs and ++ reasonable attorneys fees and expenses. The application of the United Nations ++ Convention on Contracts for the International Sale of Goods is expressly ++ excluded. Any law or regulation which provides that the language of a contract ++ shall be construed against the drafter shall not apply to this License. ++ . ++ Exhibit A ++ . ++ The contents of this file are subject to Sections 2, 3, 4, 7, 8, 10, 12 and 13 ++ of the GLX Public License Version 1.0 (the "License"). You may not use this ++ file except in compliance with those sections of the License. You may obtain a ++ copy of the License at Silicon Graphics, Inc., attn: Legal Services, 2011 N. ++ Shoreline Blvd., Mountain View, CA 94043 or at ++ http://www.sgi.com/software/opensource/glx/license.html. ++ . ++ Software distributed under the License is distributed on an "AS IS" basis. ALL ++ WARRANTIES ARE DISCLAIMED, INCLUDING, WITHOUT LIMITATION, ANY IMPLIED ++ WARRANTIES OF MERCHANTABILITY, OF FITNESS FOR A PARTICULAR PURPOSE OR OF NON- ++ INFRINGEMENT. See the License for the specific language governing rights and ++ limitations under the License. ++ . ++ The Original Software is GLX version 1.2 source code, released February, 1999. ++ The developer of the Original Software is Silicon Graphics, Inc. Those ++ portions of the Subject Software created by Silicon Graphics, Inc. are ++ Copyright (c) 1991-9 Silicon Graphics, Inc. All Rights Reserved. 3.5. CID ++ Font Code Public License ++ . ++ CID FONT CODE PUBLIC LICENSE (Version 1.0 (3/31/99))("License") ++ . ++ Subject to any applicable third party claims, Silicon Graphics, Inc. ("SGI") ++ hereby grants permission to Recipient (defined below), under SGI's copyrights ++ in the Original Software (defined below), to use, copy, modify, merge, ++ publish, distribute, sublicense and/or sell copies of Subject Software ++ (defined below) in both source code and executable form, and to permit persons ++ to whom the Subject Software is furnished in accordance with this License to ++ do the same, subject to all of the following terms and conditions, which ++ Recipient accepts by engaging in any such use, copying, modifying, merging, ++ publication, distributing, sublicensing or selling: ++ . ++ 1. Definitions. ++ . ++ a. "Original Software" means source code of computer software code that is ++ described in Exhibit A as Original Software. ++ . ++ b. "Modifications" means any addition to or deletion from the substance or ++ structure of either the Original Software or any previous Modifications. ++ When Subject Software is released as a series of files, a Modification ++ means (i) any addition to or deletion from the contents of a file ++ containing Original Software or previous Modifications and (ii) any new ++ file that contains any part of the Original Code or previous ++ Modifications. ++ . ++ c. "Subject Software" means the Original Software or Modifications or the ++ combination of the Original Software and Modifications, or portions of any ++ of the foregoing. ++ . ++ d. "Recipient" means an individual or a legal entity exercising rights ++ under the terms of this License. For legal entities, "Recipient" includes ++ any entity that controls, is controlled by, or is under common control ++ with Recipient. For purposes of this definition, "control" of an entity ++ means (i) the power, direct or indirect, to direct or manage such entity, ++ or (ii) ownership of fifty percent (50%) or more of the outstanding shares ++ or beneficial ownership of such entity. ++ . ++ e. "Required Notice" means the notice set forth in Exhibit A to this ++ License. ++ . ++ f. "Accompanying Technology" means any software or other technology that ++ is not a Modification and that is distributed or made publicly available ++ by Recipient with the Subject Software. Separate software files that do ++ not contain any Original Software or any previous Modification shall not ++ be deemed a Modification, even if such software files are aggregated as ++ part of a product, or in any medium of storage, with any file that does ++ contain Original Software or any previous Modification. ++ . ++ 2. License Terms. All distribution of the Subject Software must be made ++ subject to the terms of this License. A copy of this License and the Required ++ Notice must be included in any documentation for Subject Software where ++ Recipient's rights relating to Subject Software and/or any Accompanying ++ Technology are described. Distributions of Subject Software in source code ++ form must also include the Required Notice in every file distributed. In ++ addition, a ReadMe file entitled "Important Legal Notice" must be distributed ++ with each distribution of one or more files that incorporate Subject Software. ++ That file must be included with distributions made in both source code and ++ executable form. A copy of the License and the Required Notice must be ++ included in that file. Recipient may distribute Accompanying Technology under ++ a license of Recipient's choice, which may contain terms different from this ++ License, provided that (i) Recipient is in compliance with the terms of this ++ License, (ii) such other license terms do not modify or supersede the terms of ++ this License as applicable to the Subject Software, (iii) Recipient hereby ++ indemnifies SGI for any liability incurred by SGI as a result of the ++ distribution of Accompanying Technology or the use of other license terms. ++ . ++ 3. Termination. This License and the rights granted hereunder will terminate ++ automatically if Recipient fails to comply with terms herein and fails to cure ++ such breach within 30 days of the breach. Any sublicense to the Subject ++ Software that is properly granted shall survive any termination of this ++ License absent termination by the terms of such sublicense. Provisions which, ++ by their nature, must remain in effect beyond the termination of this License ++ shall survive. ++ . ++ 4. Trademark Rights. This License does not grant any rights to use any trade ++ name, trademark or service mark whatsoever. No trade name, trademark or ++ service mark of SGI may be used to endorse or promote products derived from or ++ incorporating any Subject Software without prior written permission of SGI. ++ . ++ 5. No Other Rights. No rights or licenses not expressly granted hereunder ++ shall arise by implication, estoppel or otherwise. Title to and ownership of ++ the Original Software at all times remains with SGI. All rights in the ++ Original Software not expressly granted under this License are reserved. ++ . ++ 6. Compliance with Laws; Non-Infringement. Recipient shall comply with all ++ applicable laws and regulations in connection with use and distribution of the ++ Subject Software, including but not limited to, all export and import control ++ laws and regulations of the U.S. government and other countries. Recipient may ++ not distribute Subject Software that (i) in any way infringes (directly or ++ contributorily) the rights (including patent, copyright, trade secret, ++ trademark or other intellectual property rights of any kind) of any other ++ person or entity, or (ii) breaches any representation or warranty, express, ++ implied or statutory, which under any applicable law it might be deemed to ++ have been distributed. ++ . ++ 7. Claims of Infringement. If Recipient at any time has knowledge of any one ++ or more third party claims that reproduction, modification, use, distribution, ++ import or sale of Subject Software (including particular functionality or code ++ incorporated in Subject Software) infringes the third party's intellectual ++ property rights, Recipient must place in a well-identified web page bearing ++ the title "LEGAL" a description of each such claim and a description of the ++ party making each such claim in sufficient detail that a user of the Subject ++ Software will know whom to contact regarding the claim. Also, upon gaining ++ such knowledge of any such claim, Recipient must conspicuously include the URL ++ for such web page in the Required Notice, and in the text of any related ++ documentation, license agreement or collateral in which Recipient describes ++ end user's rights relating to the Subject Software. If Recipient obtains such ++ knowledge after it makes Subject Software available to any other person or ++ entity, Recipient shall take other steps (such as notifying appropriate ++ mailing lists or newsgroups) reasonably calculated to provide such knowledge ++ to those who received the Subject Software. ++ . ++ 8. DISCLAIMER OF WARRANTY. SUBJECT SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, ++ WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, WITHOUT ++ LIMITATION, WARRANTIES THAT THE SUBJECT SOFTWARE IS FREE OF DEFECTS, ++ MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR NON-INFRINGING. SGI ASSUMES NO ++ RISK AS TO THE QUALITY AND PERFORMANCE OF THE SOFTWARE. SHOULD ANY SOFTWARE ++ PROVE DEFECTIVE IN ANY RESPECT, SGI ASSUMES NO COST OR LIABILITY FOR ANY ++ SERVICING, REPAIR OR CORRECTION. THIS DISCLAIMER OF WARRANTY CONSTITUTES AN ++ ESSENTIAL PART OF THIS LICENSE. NO USE OF ANY SUBJECT SOFTWARE IS AUTHORIZED ++ HEREUNDER EXCEPT UNDER THIS DISCLAIMER. ++ . ++ 9. LIMITATION OF LIABILITY. UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, ++ WHETHER TORT (INCLUDING, WITHOUT LIMITATION, NEGLIGENCE OR STRICT LIABILITY), ++ CONTRACT, OR OTHERWISE, SHALL SGI OR ANY SGI LICENSOR BE LIABLE FOR ANY CLAIM, ++ DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR ++ OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SUBJECT SOFTWARE OR ++ THE USE OR OTHER DEALINGS IN THE SUBJECT SOFTWARE. SOME JURISDICTIONS DO NOT ++ ALLOW THE EXCLUSION OR LIMITATION OF CERTAIN DAMAGES, SO THIS EXCLUSION AND ++ LIMITATION MAY NOT APPLY TO RECIPIENT TO THE EXTENT SO DISALLOWED. ++ . ++ 10. Indemnity. Recipient shall be solely responsible for damages arising, ++ directly or indirectly, out of its utilization of rights under this License. ++ Recipient will defend, indemnify and hold SGI and its successors and assigns ++ harmless from and against any loss, liability, damages, costs or expenses ++ (including the payment of reasonable attorneys fees) arising out of ++ (Recipient's use, modification, reproduction and distribution of the Subject ++ Software or out of any representation or warranty made by Recipient. ++ . ++ 11. U.S. Government End Users. The Subject Software is a "commercial item" ++ consisting of "commercial computer software" as such terms are defined in ++ title 48 of the Code of Federal Regulations and all U.S. Government End Users ++ acquire only the rights set forth in this License and are subject to the terms ++ of this License. ++ . ++ 12. Miscellaneous. This License represents the complete agreement concerning ++ subject matter hereof. If any provision of this License is held to be ++ unenforceable by any judicial or administrative authority having proper ++ jurisdiction with respect thereto, such provision shall be reformed so as to ++ achieve as nearly as possible the same economic effect as the original ++ provision and the remainder of this License will remain in effect. This ++ License shall be governed by and construed in accordance with the laws of the ++ United States and the State of California as applied to agreements entered ++ into and to be performed entirely within California between California ++ residents. Any litigation relating to this License shall be subject to the ++ exclusive jurisdiction of the Federal Courts of the Northern District of ++ California (or, absent subject matter jurisdiction in such courts, the courts ++ of the State of California), with venue lying exclusively in Santa Clara ++ County, California, with the losing party responsible for costs, including ++ without limitation, court costs and reasonable attorneys fees and expenses. ++ The application of the United Nations Convention on Contracts for the ++ International Sale of Goods is expressly excluded. Any law or regulation that ++ provides that the language of a contract shall be construed against the ++ drafter shall not apply to this License. ++ . ++ Exhibit A ++ . ++ Copyright (c) 1994-1999 Silicon Graphics, Inc. ++ . ++ The contents of this file are subject to the CID Font Code Public License ++ Version 1.0 (the "License"). You may not use this file except in compliance ++ with the License. You may obtain a copy of the License at Silicon Graphics, ++ Inc., attn: Legal Services, 2011 N. Shoreline Blvd., Mountain View, CA 94043 ++ or at http://www.sgi.com/software/opensource/cid/license.html ++ . ++ Software distributed under the License is distributed on an "AS IS" basis. ALL ++ WARRANTIES ARE DISCLAIMED, INCLUDING, WITHOUT LIMITATION, ANY IMPLIED ++ WARRANTIES OF MERCHANTABILITY, OF FITNESS FOR A PARTICULAR PURPOSE OR OF ++ NON-INFRINGEMENT. See the License for the specific language governing rights ++ and limitations under the License. ++ . ++ The Original Software (as defined in the License) is CID font code that was ++ developed by Silicon Graphics, Inc. Those portions of the Subject Software (as ++ defined in the License) that were created by Silicon Graphics, Inc. are ++ Copyright (c) 1994-1999 Silicon Graphics, Inc. All Rights Reserved. ++ . ++ [NOTE: When using this text in connection with Subject Software delivered ++ solely in object code form, Recipient may replace the words "this file" with ++ "this software" in both the first and second sentences.] 3.6. Bitstream Vera ++ Fonts Copyright ++ . ++ The fonts have a generous copyright, allowing derivative works (as long as ++ "Bitstream" or "Vera" are not in the names), and full redistribution (so long ++ as they are not *sold* by themselves). They can be be bundled, redistributed ++ and sold with any software. ++ . ++ The fonts are distributed under the following copyright: ++ . ++ Copyright (c) 2003 by Bitstream, Inc. All Rights Reserved. Bitstream Vera is a ++ trademark of Bitstream, Inc. ++ . ++ Permission is hereby granted, free of charge, to any person obtaining a copy ++ of the fonts accompanying this license ("Fonts") and associated documentation ++ files (the "Font Software"), to reproduce and distribute the Font Software, ++ including without limitation the rights to use, copy, merge, publish, ++ distribute, and/or sell copies of the Font Software, and to permit persons to ++ whom the Font Software is furnished to do so, subject to the following ++ conditions: ++ . ++ The above copyright and trademark notices and this permission notice shall be ++ included in all copies of one or more of the Font Software typefaces. ++ . ++ The Font Software may be modified, altered, or added to, and in particular the ++ designs of glyphs or characters in the Fonts may be modified and additional ++ glyphs or characters may be added to the Fonts, only if the fonts are renamed ++ to names not containing either the words "Bitstream" or the word "Vera". ++ . ++ This License becomes null and void to the extent applicable to Fonts or Font ++ Software that has been modified and is distributed under the "Bitstream Vera" ++ names. ++ . ++ The Font Software may be sold as part of a larger software package but no copy ++ of one or more of the Font Software typefaces may be sold by itself. ++ . ++ THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS ++ OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF MERCHANTABILITY, ++ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF COPYRIGHT, PATENT, ++ TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL BITSTREAM OR THE GNOME FOUNDATION ++ BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, INCLUDING ANY GENERAL, ++ SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, WHETHER IN AN ACTION ++ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF THE USE OR INABILITY TO ++ USE THE FONT SOFTWARE OR FROM OTHER DEALINGS IN THE FONT SOFTWARE. ++ . ++ Except as contained in this notice, the names of Gnome, the Gnome Foundation, ++ and Bitstream Inc., shall not be used in advertising or otherwise to promote ++ the sale, use or other dealings in this Font Software without prior written ++ authorization from the Gnome Foundation or Bitstream Inc., respectively. For ++ further information, contact: fonts at gnome dot org. 3.7. Bigelow & Holmes ++ Inc and URW++ GmbH Luxi font license ++ . ++ Luxi fonts copyright (c) 2001 by Bigelow & Holmes Inc. Luxi font instruction ++ code copyright (c) 2001 by URW++ GmbH. All Rights Reserved. Luxi is a ++ registered trademark of Bigelow & Holmes Inc. ++ . ++ Permission is hereby granted, free of charge, to any person obtaining a copy ++ of these Fonts and associated documentation files (the "Font Software"), to ++ deal in the Font Software, including without limitation the rights to use, ++ copy, merge, publish, distribute, sublicense, and/or sell copies of the Font ++ Software, and to permit persons to whom the Font Software is furnished to do ++ so, subject to the following conditions: ++ . ++ The above copyright and trademark notices and this permission notice shall be ++ included in all copies of one or more of the Font Software. ++ . ++ The Font Software may not be modified, altered, or added to, and in particular ++ the designs of glyphs or characters in the Fonts may not be modified nor may ++ additional glyphs or characters be added to the Fonts. This License becomes ++ null and void when the Fonts or Font Software have been modified. ++ . ++ THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS ++ OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF MERCHANTABILITY, ++ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF COPYRIGHT, PATENT, ++ TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL BIGELOW & HOLMES INC. OR URW++ ++ GMBH. BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, INCLUDING ANY ++ GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, WHETHER IN ++ AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF THE USE OR ++ INABILITY TO USE THE FONT SOFTWARE OR FROM OTHER DEALINGS IN THE FONT ++ SOFTWARE. ++ . ++ Except as contained in this notice, the names of Bigelow & Holmes Inc. and ++ URW++ GmbH. shall not be used in advertising or otherwise to promote the sale, ++ use or other dealings in this Font Software without prior written ++ authorization from Bigelow & Holmes Inc. and URW++ GmbH. ++ . ++ For further information, contact: ++ . ++ info@urwpp.de or design@bigelowandholmes.com ++ . ++ . ++ --- end of LICENSE --- ++ . ++ ------------------------------------------------------------------------------ ++ . ++ %% This notice is provided with respect to zlib v1.2.5, which may be included ++ with JRE 8, JDK 8, and OpenJDK 8. ++ . ++ --- begin of LICENSE --- ++ . ++ version 1.2.5, July 18th, 2005 ++ . ++ Copyright (C) 1995-2005 Jean-loup Gailly and Mark Adler ++ . ++ This software is provided 'as-is', without any express or implied ++ warranty. In no event will the authors be held liable for any damages ++ arising from the use of this software. ++ . ++ Permission is granted to anyone to use this software for any purpose, ++ including commercial applications, and to alter it and redistribute it ++ freely, subject to the following restrictions: ++ . ++ 1. The origin of this software must not be misrepresented; you must not ++ claim that you wrote the original software. If you use this software ++ in a product, an acknowledgment in the product documentation would be ++ appreciated but is not required. ++ 2. Altered source versions must be plainly marked as such, and must not be ++ misrepresented as being the original software. ++ 3. This notice may not be removed or altered from any source distribution. ++ . ++ Jean-loup Gailly Mark Adler ++ jloup@gzip.org madler@alumni.caltech.edu ++ . ++ --- end of LICENSE --- ++ . ++ ------------------------------------------------------------------------------ ++ . ++ %% This notice is provided with respect to the following which may be ++ included with JRE 8, JDK 8, and OpenJDK 8, except where noted: ++ . ++ Apache Commons Math 2.2 ++ Apache Derby 10.10.1.2 [included with JDK 8] ++ Apache Jakarta BCEL 5.2 ++ Apache Jakarta Regexp 1.4 ++ Apache Santuario XML Security for Java 1.5.4 ++ Apache Xalan-Java 2.7.1 ++ Apache Xerces Java 2.10.0 ++ Apache XML Resolver 1.1 ++ Dynalink 0.5 ++ . ++ These components are licensed under the Apache License, Version 2.0. ++ See /usr/share/common-licenses/Apache-2.0 ++ . ++ ------------------------------------------------------------------------------ ++ . ++ . ++ ============================================================================== ++ ++Files: debian/* ++Copyright: Copyright © 2007-2023 Canonical Ltd. ++License: MIT ++ Permission is hereby granted, free of charge, to any person obtaining ++ a copy of this software and associated documentation files (the ++ "Software"), to deal in the Software without restriction, including ++ without limitation the rights to use, copy, modify, merge, publish, ++ distribute, sublicense, and/or sell copies of the Software, and to ++ permit persons to whom the Software is furnished to do so, subject to ++ the following conditions: ++ . ++ The above copyright notice and this permission notice shall be ++ included in all copies or substantial portions of the Software. ++ . ++ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND ++ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE ++ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION ++ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION ++ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ ++Files: debian/dbg.py ++Copyright: Copyright 2016, Red Hat and individual contributors ++License: LGPL ++ # Copyright 2016, Red Hat and individual contributors ++ # by the @authors tag. ++ # ++ # This is free software; you can redistribute it and/or modify it ++ # under the terms of the GNU Lesser General Public License as ++ # published by the Free Software Foundation; either version 2.1 of ++ # the License, or (at your option) any later version. ++ # ++ # This software is distributed in the hope that it will be useful, ++ # but WITHOUT ANY WARRANTY; without even the implied warranty of ++ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ # Lesser General Public License for more details. ++ . ++ On Debian systems a full copy of the GNU General Public License, GPL, can be ++ found in the file /usr/share/common-licenses/GPL-2. diff --cc debian/dbg.py index 000000000,000000000..47db8589f new file mode 100644 --- /dev/null +++ b/debian/dbg.py @@@ -1,0 -1,0 +1,1232 @@@ ++# Copyright 2016, Red Hat and individual contributors ++# by the @authors tag. ++# ++# This is free software; you can redistribute it and/or modify it ++# under the terms of the GNU Lesser General Public License as ++# published by the Free Software Foundation; either version 2.1 of ++# the License, or (at your option) any later version. ++# ++# This software is distributed in the hope that it will be useful, ++# but WITHOUT ANY WARRANTY; without even the implied warranty of ++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++# Lesser General Public License for more details. ++# ++# You should have received a copy of the GNU Lesser General Public ++# License along with this software; if not, write to the Free ++# Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA ++# 02110-1301 USA, or see the FSF site: http://www.fsf.org. ++# ++# @authors Andrew Dinn ++ ++import gdb ++ ++import re ++from gdb.FrameDecorator import FrameDecorator ++ ++# we rely on the unwinder API provided by gdb.7.10 ++ ++_dump_frame = False ++_have_unwinder = True ++try: ++ from gdb.unwinder import Unwinder ++ gdb.write("Installing openjdk unwinder\n") ++except ImportError: ++ _have_unwinder = False ++ # We need something here; it doesn't matter what as no unwinder ++ # will ever be instantiated. ++ Unwinder = object ++ ++ ++def debug_write(msg): ++ gdb.write(msg) ++ # pass ++ ++def t(msg): ++ gdb.write("%s\n" % msg) ++ # pass ++ ++# debug_write("dbg.py\n") ++ ++# class providing various type conversions for gdb Value instances ++ ++class Types(object): ++ # cache some basic primitive and pointer types ++ byte_t = gdb.lookup_type('unsigned char') ++ char_t = gdb.lookup_type('char') ++ int_t = gdb.lookup_type('int') ++ long_t = gdb.lookup_type('long') ++ void_t = gdb.lookup_type('void') ++ ++ bytep_t = byte_t.pointer() ++ charp_t = char_t.pointer() ++ intp_t = int_t.pointer() ++ longp_t = long_t.pointer() ++ voidp_t = void_t.pointer() ++ ++ codeblobp_t = gdb.lookup_type('CodeBlob').pointer() ++ cmethodp_t = gdb.lookup_type('CompiledMethod').pointer() ++ nmethodp_t = gdb.lookup_type('nmethod').pointer() ++ ++ ptrp_t = voidp_t.pointer() ++ ++ # convert Values to primitive Values ++ @classmethod ++ def cast_byte(cls, value): ++ return value.cast(cls.byte_t) ++ ++ @classmethod ++ def cast_int(cls, value): ++ return value.cast(cls.int_t) ++ ++ @classmethod ++ def cast_long(cls, value): ++ return value.cast(cls.long_t) ++ ++ # convert Values to pointer Values ++ @classmethod ++ def cast_bytep(cls, value): ++ return value.cast(cls.bytep_t) ++ ++ @classmethod ++ def cast_intp(cls, value): ++ return value.cast(cls.intp_t) ++ ++ @classmethod ++ def cast_longp(cls, value): ++ return value.cast(cls.longp_t) ++ ++ @classmethod ++ def cast_voidp(cls, value): ++ return value.cast(cls.voidp_t) ++ ++ @classmethod ++ def cast_ptrp(cls, value): ++ return value.cast(cls.ptrp_t) ++ ++ # cast Value to pointer type then load and return contents as Value ++ @classmethod ++ def load_ptr(cls, value): ++ return cls.cast_ptrp(cls.cast_ptrp(value).dereference()) ++ ++ @classmethod ++ def load_byte(cls, value): ++ return cls.cast_bytep(value).dereference() ++ ++ @classmethod ++ def load_int(cls, value): ++ return cls.cast_intp(value).dereference() ++ ++ @classmethod ++ def load_long(cls, value): ++ return cls.cast_longp(value).dereference() ++ ++ # cast Value to int and return as python integer ++ @classmethod ++ def as_int(cls, value): ++ return int(cls.cast_int(value)) ++ ++ # cast Value to long and return as python integer ++ @classmethod ++ def as_long(cls, value): ++ return int(cls.cast_long(value)) ++ ++ # construct Value from integer x and cast to type t ++ @classmethod ++ def to_type(cls, x, t): ++ return gdb.Value(x).cast(t) ++ ++ # construct voidp Value from integer x ++ @classmethod ++ def to_voidp(cls, x): ++ return cls.to_type(x, cls.voidp_t) ++ ++ # construct void ** Value from integer x ++ @classmethod ++ def to_ptrp(cls, x): ++ return cls.to_type(x, cls.ptrp_t) ++ ++# OpenJDK specific classes which understand the layout of the code ++# heap and know how to translate a PC to an associated code blob and, ++# from there to a method object. n.b. in some cases the latter step ++# requires a frame base pointer bu tthat can be calculated using the ++# code blob and stack pointer ++ ++# class encapsulating details of a specific heap ++ ++class CodeHeap: ++ ++ # track whether we have static initialized yet ++ class_inited = False ++ heap_block_type = None ++ code_blob_type = None ++ ++ @classmethod ++ def class_init(cls): ++ # t("CodeHeap.class_init") ++ if cls.class_inited: ++ return ++ # we can only proceed if we have the necessary heap symbols ++ # if this excepts then we don't care ++ cls.heap_block_type = gdb.lookup_type("HeapBlock").pointer() ++ cls.code_blob_type = gdb.lookup_type("CodeBlob").pointer() ++ cls.class_inited = True ++ ++ def __init__(self, heap): ++ # t("CodeHeap.__init__") ++ # make sure we have static inited successfuly ++ self.class_init() ++ # if we got here we are ok to create a new instance ++ self.heap = heap ++ self.name = str(heap['_name']) ++ self.lo = Types.as_long(heap['_memory']['_low_boundary']) ++ self.hi = Types.as_long(heap['_memory']['_high_boundary']) ++ self.segmap = heap['_segmap'] ++ self.segmap_lo = self.segmap['_low'] ++ self.segment_size = int(heap['_segment_size']) ++ self.log2_segment_size = int(heap['_log2_segment_size']) ++ # debug_write("@@ heap.name = %s\n" % self.name) ++ # debug_write("@@ heap.lo = 0x%x\n" % self.lo) ++ # debug_write("@@ heap.hi = 0x%x\n" % self.hi) ++ ++ def inrange(self, x): ++ # t("CodeHeap.inrange") ++ return self.lo <= x and self.hi > x ++ def findblob(self, pc): ++ # t("CodeHeap.findblob") ++ x = Types.as_long(pc) ++ # debug_write("@@ findblob(%s, 0x%x)\n" % (self.name, pc)) ++ # debug_write("@@ pc (%s) = 0x%x \n" % (str(pc.type), pc)) ++ # debug_write("@@ self.lo = 0x%x\n" % self.lo) ++ # debug_write("@@ self.hi = 0x%x\n" % self.hi) ++ # check pc is in this heap's range ++ # t("if not self.inrange(x):") ++ if not self.inrange(x): ++ return None ++ # debug_write("@@ pc in range\n") ++ # t("segments = 0") ++ segments = 0 ++ # debug_write("@@ segmap_lo (%s) = 0x%x\n" % (str(self.segmap_lo.type), self.segmap_lo)) ++ # debug_write("@@ self.lo = 0x%x \n" % self.lo) ++ # debug_write("@@ self.log2_segment_size = 0x%x \n" % self.log2_segment_size) ++ # t("offset = Types.as_long(pc - self.lo)") ++ offset = Types.as_long(pc - self.lo) ++ # debug_write("@@ offset = 0x%x\n" % offset) ++ # t("shift = self.log2_segment_size") ++ shift = self.log2_segment_size ++ # debug_write("@@ shift = 0x%x\n" % shift) ++ # t("segment = offset >> shift") ++ segment = offset >> shift ++ # segment = (offset >> self.log2_segment_size) ++ #segment = offset >> (self.log2_segment_size & 0x31) ++ # debug_write("@@ segment = 0x%x\n" % segment) ++ # t("tag = (self.segmap_lo + segment).dereference() & 0xff") ++ tag = (self.segmap_lo + segment).dereference() & 0xff ++ # tag = Types.load_byte(self.segmap_lo + segment) & 0xff ++ # debug_write("@@ tag (%s) = 0x%x\n" % (str(tag.type), tag)) ++ # t("while tag > 0 and segments < 64:") ++ while tag > 0 and segments < 64: ++ # t("segment = segment - tag") ++ segment = segment - tag ++ # debug_write("@@ segment = 0x%x\n" % segment) ++ # t("tag = (self.segmap_lo + segment).dereference() & 0xff") ++ tag = (self.segmap_lo + segment).dereference() & 0xff ++ # debug_write("@@ tag (%s) = 0x%x\n" % (str(tag.type), tag)) ++ # t("segments += 1") ++ segments += 1 ++ # t("if tag != 0:") ++ if tag != 0: ++ # t("return None") ++ return None ++ # debug_write("@@ lo = 0x%x\n" % self.lo) ++ # debug_write("@@ segment << self.log2_segment_size = 0x%x\n" % (segment << self.log2_segment_size)) ++ # t("block_addr = self.lo + (segment << self.log2_segment_size)") ++ block_addr = self.lo + (segment << self.log2_segment_size) ++ # debug_write("@@ block_addr (%s) = 0x%x\n" % (str(block_addr.type), block_addr)) ++ # t("heap_block = gdb.Value(block_addr).cast(CodeHeap.heap_block_type)") ++ heap_block = gdb.Value(block_addr).cast(CodeHeap.heap_block_type) ++ # debug_write("@@ heap_block (%s) = 0x%x\n" % (str(heap_block.type), heap_block)) ++ # t("if heap_block['_header']['_used'] != 1:") ++ if heap_block['_header']['_used'] != 1: ++ # hmm, this is not meant to happen ++ # t("return None") ++ return None ++ # t("blob = (heap_block + 1).cast(CodeHeap.code_blob_type)") ++ blob = (heap_block + 1).cast(CodeHeap.code_blob_type) ++ return blob ++ ++# class encapsulating access to code cache memory ++# this is essentially all static as per the JVM class ++ ++class CodeCache: ++ # track whether we have successfully intiialized ++ class_inited = False ++ # static heap start, lo and hi bounds for code addresses ++ lo = 0 ++ hi = 0 ++ heap_count = 0 ++ heap_list = [] ++ ++ @classmethod ++ def class_init(cls): ++ # t("CodeCache.class_init") ++ if cls.class_inited: ++ return ++ # t("if cls.lo == 0 or cls.hi == 0:") ++ if cls.lo == 0 or cls.hi == 0: ++ try: ++ # t("cls.lo = gdb.parse_and_eval(\"CodeCache::_low_bound\")") ++ lo = gdb.parse_and_eval("CodeCache::_low_bound") ++ cls.lo = Types.as_long(lo) ++ # debug_write("@@ CodeCache::_low_bound = 0x%x\n" % cls.lo) ++ if cls.lo == 0: ++ return ++ # t("cls.hi = gdb.parse_and_eval(\"CodeCache::_high_bound\")") ++ hi = gdb.parse_and_eval("CodeCache::_high_bound") ++ cls.hi = Types.as_long(hi) ++ # debug_write("@@ CodeCache::_high_bound = 0x%x\n" % cls.hi) ++ if cls.hi == 0: ++ return ++ except Exception as arg: ++ # debug_write("@@ %s\n" % arg) ++ cls.lo = 0 ++ cls.hi = 0 ++ cls.class_inited = False ++ raise ++ ++ # t("f cls.heap_list == []:") ++ if cls.heap_list == []: ++ try: ++ # t("heaps = gdb.parse_and_eval(\"CodeCache::_heaps\")") ++ heaps = gdb.parse_and_eval("CodeCache::_heaps") ++ # debug_write("@@ CodeCache::_heaps (%s) = 0x%x\n" % (heaps.type, heaps)) ++ # t("len = int(heaps['_len'])") ++ len = int(heaps['_len']) ++ # debug_write("@@ CodeCache::_heaps->_len = %d\n" % len) ++ # t("data = heaps['_data']") ++ data = heaps['_data'] ++ # debug_write("@@ CodeCache::_heaps->_data = 0x%x\n" % data) ++ # t("for i in range(0, len):") ++ for i in range(0, len): ++ # t("heap = CodeHeap((data + i).dereference())") ++ heap = CodeHeap((data + i).dereference()) ++ # t("cls.heap_list.append(heap)") ++ cls.heap_list.append(heap) ++ # t("cls.heap_count += 1") ++ cls.heap_count += 1 ++ except Exception as arg: ++ # debug_write("@@ %s\n" % arg) ++ cls.heap_list = [] ++ cls.heap_count = 0 ++ cls.class_inited = False ++ raise ++ cls.class_inited = True ++ ++ @classmethod ++ def inrange(cls, pc): ++ # t("CodeCache.inrange") ++ # make sure we are initialized ++ cls.class_init() ++ # if we got here we can use the heaps ++ x = Types.as_long(pc) ++ # t("return cls.lo <= x and cls.hi > x") ++ return cls.lo <= x and cls.hi > x ++ ++ @classmethod ++ def makestr(cls, charcnt, charptr): ++ # t("CodeCache.makestr") ++ #res = "" ++ #for i in range(0, charcnt): ++ # c = (charptr + i).dereference() ++ # res = ("%s%c" % (res, c)) ++ #return res ++ # debug_write("charcnt = %d charptr = %s\n" % (charcnt, str(charptr))) ++ return charptr.string("ascii", "ignore", charcnt) ++ ++ # given a PC find the associated OpenJDK code blob instance ++ @classmethod ++ def findblob(cls, pc): ++ # t("CodeCache.findblob") ++ # make sure we are initialized ++ cls.class_init() ++ # if we got here we can use the heaps ++ if not cls.inrange(pc): ++ raise gdb.GdbError("dbg.findblob : address 0x%x is not in range!" % pc) ++ for heap in cls.heap_list: ++ try: ++ # t("blob = heap.findblob(pc)") ++ blob = heap.findblob(pc) ++ except Exception as arg: ++ # debug_write("@@ findblob excepted %s\n" % str(arg)) ++ # t("blob = None") ++ blob = None ++ # t("if blob != None:") ++ if blob != None: ++ # t("name=str(blob['_name'])") ++ name=str(blob['_name']) ++ # debug_write("@@ blob(0x%x) -> %s\n" % (pc, name)) ++ # t("return blob") ++ return blob ++ # t("raise gdb.GdbError") ++ raise gdb.GdbError("dbg.findblob : no blob for inrange address 0x%x!" % pc) ++ ++# abstract over some constants for stack frame layout ++ ++class FrameConstants(object): ++ class_inited = False ++ _sender_sp_offset = 0 ++ _interpreter_frame_sender_sp_offset = 0 ++ _interpreter_frame_method_offset = 0 ++ _interpreter_frame_bcp_offset = 0 ++ @classmethod ++ def class_init(cls): ++ if cls.class_inited: ++ return ++ cls._interpreter_frame_sender_sp_offset = int(gdb.parse_and_eval("frame::interpreter_frame_sender_sp_offset")) ++ cls._sender_sp_offset = int(gdb.parse_and_eval("frame::sender_sp_offset")) ++ cls._interpreter_frame_method_offset = int(gdb.parse_and_eval("frame::interpreter_frame_method_offset")) ++ cls._interpreter_frame_bcp_offset = int(gdb.parse_and_eval("frame::interpreter_frame_bcp_offset")) ++ # only set if we got here with no errors ++ cls.class_inited = True ++ @classmethod ++ def sender_sp_offset(cls): ++ cls.class_init() ++ return cls._sender_sp_offset ++ @classmethod ++ def interpreter_frame_sender_sp_offset(cls): ++ cls.class_init() ++ return cls._interpreter_frame_sender_sp_offset ++ @classmethod ++ def interpreter_frame_method_offset(cls): ++ cls.class_init() ++ return cls._interpreter_frame_method_offset ++ @classmethod ++ def interpreter_frame_bcp_offset(cls): ++ cls.class_init() ++ return cls._interpreter_frame_bcp_offset ++ ++# class which knows how to read a compressed stream of bytes n.b. at ++# the moment we only need to know how to read bytes and ints ++ ++class CompressedStream: ++ # important constants for compressed stream read ++ BitsPerByte = 8 ++ lg_H = 6 ++ H = 1 << lg_H ++ L = (1 << BitsPerByte) - H ++ MAX_i = 4 ++ ++ def __init__(self, data): ++ # data is a gdb.Value of type 'byte *' ++ self.data = data ++ self.pos = 0 ++ # retrieve the byte at offset pos ++ def at(self, pos): ++ return int(Types.load_byte(self.data + pos)) ++ # read and return the next byte ++ def read(self): ++ # t("CompressedStream.read()") ++ pos = self.pos ++ b = self.at(pos) ++ self.pos = pos+1 ++ return b ++ def read_int(self): ++ # t("CompressedStream.read_int()") ++ b0 = self.read() ++ # debug_write("b0 = 0x%x\n" % b0) ++ if b0 < CompressedStream.L: ++ return b0 ++ return self.read_int_mb(b0) ++ def read_signed_int(self): ++ # t("CompressedStream.read_signed_int()") ++ return self.decode_sign(self.read_int()) ++ def decode_sign(self, x): ++ # t("CompressedStream.decode_sign()") ++ return (x >> 1) ^ (0 - (x & 1)) ++ def read_int_mb(self, b): ++ t# ("CompressedStream.read_int_mb()") ++ sum = b ++ pos = self.pos ++ # debug_write("pos = %d\n" % pos) ++ # debug_write("sum = 0x%x\n" % sum) ++ lg_H_i = CompressedStream.lg_H ++ i = 0 ++ while (True): ++ b_i = self.at(pos + i) ++ # debug_write("b_%d = %d\n" % (i, b_i)) ++ sum += (b_i << lg_H_i) ++ # debug_write("sum = 0x%x\n" % sum) ++ i += 1 ++ if b_i < CompressedStream.L or i == CompressedStream.MAX_i: ++ self.pos = pos + i ++ # debug_write("self.pos = %d\n" % self.pos) ++ return sum ++ lg_H_i += CompressedStream.lg_H ++ ++ ++# class which knows how to find method and bytecode ++# index pairs from a pc associated with a given compiled ++# method. n.b. there may be more than one such pair ++# because of inlining. ++ ++class MethodBCIReader: ++ pcdescstar_t = None ++ class_inited = False ++ @classmethod ++ def class_init(cls): ++ # t("MethodBCIReader.class_init") ++ if cls.class_inited: ++ return ++ # cache some useful types ++ cls.pcdesc_p = gdb.lookup_type("PcDesc").pointer() ++ cls.metadata_p = gdb.lookup_type("Metadata").pointer() ++ cls.metadata_pp = cls.metadata_p.pointer() ++ cls.method_p = gdb.lookup_type("Method").pointer() ++ cls.class_inited = True ++ ++ @classmethod ++ def as_pcdesc_p(cls, val): ++ return Types.to_type(val, cls.pcdesc_p) ++ ++ def __init__(self, nmethod, method): ++ # t("MethodBCIReader.__init__") ++ # ensure we have cached the necessary types ++ self.class_init() ++ # need to unpack pc scopes ++ self.nmethod = nmethod ++ self.method = method ++ # debug_write("nmethod (%s) = 0x%x\n" % (str(nmethod.type), Types.as_long(nmethod))) ++ blob = Types.to_type(nmethod, Types.codeblobp_t); ++ self.code_begin = Types.as_long(blob['_code_begin']) ++ self.code_end = Types.as_long(blob['_code_end']) ++ scopes_pcs_begin_offset = Types.as_int(nmethod['_scopes_pcs_offset']) ++ # debug_write("scopes_pcs_begin_offset = 0x%x\n" % scopes_pcs_begin_offset) ++ scopes_pcs_end_offset = Types.as_int(nmethod['_dependencies_offset']) ++ # debug_write("scopes_pcs_end_offset = 0x%x\n" % scopes_pcs_end_offset) ++ header_begin = Types.cast_bytep(nmethod) ++ self.scopes_pcs_begin = self.as_pcdesc_p(header_begin + scopes_pcs_begin_offset) ++ # debug_write("scopes_pcs_begin (%s) = 0x%x\n" % (str(self.scopes_pcs_begin.type), Types.as_long(self.scopes_pcs_begin))) ++ self.scopes_pcs_end = self.as_pcdesc_p(header_begin + scopes_pcs_end_offset) ++ # debug_write("scopes_pcs_end (%s) = 0x%x\n" % (str(self.scopes_pcs_end.type), Types.as_long(self.scopes_pcs_end))) ++ ++ def find_pc_desc(self, pc_off): ++ lower = self.scopes_pcs_begin ++ upper = self.scopes_pcs_end - 1 ++ if lower == upper: ++ return None ++ # non-empty table always starts with lower as a sentinel with ++ # offset -1 and will have at least one real offset beyond that ++ next = lower + 1 ++ while next < upper and next['_pc_offset'] <= pc_off: ++ next = next + 1 ++ # use the last known bci below this pc ++ return next - 1 ++ ++ def pc_desc_to_method_bci_stack(self, pc_desc): ++ scope_decode_offset = Types.as_int(pc_desc['_scope_decode_offset']) ++ if scope_decode_offset == 0: ++ return [ { 'method': self.method, 'bci': 0 } ] ++ nmethod = self.nmethod ++ # debug_write("nmethod = 0x%x\n" % nmethod) ++ # debug_write("pc_desc = 0x%x\n" % Types.as_long(pc_desc)) ++ base = Types.cast_bytep(nmethod) ++ # scopes_data_offset = Types.as_int(nmethod['_scopes_data_offset']) ++ # scopes_base = base + scopes_data_offset ++ scopes_base = nmethod['_scopes_data_begin'] ++ # debug_write("scopes_base = 0x%x\n" % Types.as_long(scopes_base)) ++ metadata_offset = Types.as_int(nmethod['_metadata_offset']) ++ metadata_base = Types.to_type(base + metadata_offset, self.metadata_pp) ++ # debug_write("metadata_base = 0x%x\n" % Types.as_long(metadata_base)) ++ scope = scopes_base + scope_decode_offset ++ # debug_write("scope = 0x%x\n" % Types.as_long(scope)) ++ stream = CompressedStream(scope) ++ # debug_write("stream = %s\n" % stream) ++ sender = stream.read_int() ++ # debug_write("sender = %s\n" % sender) ++ # method name is actually in metadata ++ method_idx = stream.read_int() ++ method_md = (metadata_base + (method_idx - 1)).dereference() ++ methodptr = Types.to_type(method_md, self.method_p) ++ method = Method(methodptr) ++ # bci is offset by -1 to allow range [-1, ..., MAX_UINT) ++ bci = stream.read_int() - 1 ++ # debug_write("method,bci = %s,0x%x\n" % (method.get_name(), bci)) ++ result = [ { 'method': method, 'bci': bci } ] ++ while sender > 0: ++ # debug_write("\nsender = 0x%x\n" % sender) ++ stream = CompressedStream(scopes_base + sender) ++ sender = stream.read_int() ++ method_idx = stream.read_int() ++ method_md = (metadata_base + (method_idx - 1)).dereference() ++ methodptr = Types.to_type(method_md, self.method_p) ++ method = Method(methodptr) ++ # bci is offset by -1 to allow range [-1, ..., MAX_UINT) ++ bci = stream.read_int() - 1 ++ # debug_write("method,bci = %s,0x%x\n" % (method.get_name(), bci)) ++ result.append( { 'method': method, 'bci': bci } ) ++ return result ++ ++ def pc_to_method_bci_stack(self, pc): ++ # need to search unpacked pc scopes ++ if pc < self.code_begin or pc >= self.code_end: ++ return None ++ pc_off = pc - self.code_begin ++ # debug_write("\npc_off = 0x%x\n" % pc_off) ++ pc_desc = self.find_pc_desc(pc_off) ++ if pc_desc is None: ++ return None ++ return self.pc_desc_to_method_bci_stack(pc_desc) ++ ++# class which knows how to read a method's line ++# number table, translating bytecode indices to line numbers ++class LineReader: ++ # table is a gdb.Value of type 'byte *' (strictly 'u_char *' in JVM code) ++ def __init__(self, table): ++ # t("LineReader.init") ++ self.table = table ++ self.translations = {} ++ def bc_to_line(self, bci): ++ # t("LineReader.bc_to_line()") ++ try: ++ return self.translations[bci] ++ except Exception as arg: ++ line = self.compute_line(bci) ++ if line >= 0: ++ self.translations[bci] = line ++ return line ++ def compute_line(self, bci): ++ # t("LineReader.compute_line()") ++ # debug_write("table = 0x%x\n" % self.table) ++ bestline = -1 ++ self.stream = CompressedStream(self.table) ++ self._bci = 0 ++ self._line = 0 ++ while self.read_pair(): ++ nextbci = self._bci ++ nextline = self._line ++ if nextbci >= bci: ++ return nextline ++ else: ++ bestline = nextline ++ return bestline ++ def read_pair(self): ++ # t("LineReader.read_pair()") ++ next = self.stream.read() ++ # debug_write("next = 0x%x\n" % next) ++ if next == 0: ++ return False ++ if next == 0xff: ++ self._bci = self._bci + self.stream.read_signed_int() ++ self._line = self._line + self.stream.read_signed_int() ++ else: ++ self._bci = self._bci + (next >> 3) ++ self._line = self._line + (next & 0x7) ++ # debug_write("_bci = %d\n" % self._bci) ++ # debug_write("_line = %d\n" % self._line) ++ return True ++ ++# class to provide access to data relating to a Method object ++class Method(object): ++ def __init__(self, methodptr): ++ self.methodptr = methodptr; ++ self.name = None ++ const_method = self.methodptr['_constMethod'] ++ bcbase = Types.cast_bytep(const_method + 1) ++ bytecode_size = Types.as_int(const_method['_code_size']) ++ lnbase = Types.cast_bytep(bcbase + bytecode_size) ++ self.line_number_reader = LineReader(lnbase) ++ ++ def get_name(self): ++ if self.name == None: ++ self.make_name(self.methodptr) ++ return self.name ++ ++ def get_klass_path(self): ++ if self.name == None: ++ self.make_name(self.methodptr) ++ return self.klass_path ++ ++ def get_line(self, bci): ++ if bci < 0: ++ bci = 0 ++ return self.line_number_reader.bc_to_line(bci) ++ ++ def make_name(self, methodptr): ++ const_method = methodptr['_constMethod'] ++ constant_pool = const_method['_constants'] ++ constant_pool_base = Types.cast_voidp((constant_pool + 1)) ++ klass_sym = constant_pool['_pool_holder']['_name'] ++ klass_name = klass_sym['_body'].cast(gdb.lookup_type("char").pointer()) ++ klass_name_length = int(klass_sym['_length']) ++ klass_path = CodeCache.makestr(klass_name_length, klass_name) ++ self.klass_str = klass_path.replace("/", ".") ++ dollaridx = klass_path.find('$') ++ if dollaridx >= 0: ++ klass_path = klass_path[0:dollaridx] ++ self.klass_path = klass_path + ".java" ++ method_idx = const_method['_name_index'] ++ method_sym = (constant_pool_base + (8 * method_idx)).cast(gdb.lookup_type("Symbol").pointer().pointer()).dereference() ++ method_name = method_sym['_body'].cast(gdb.lookup_type("char").pointer()) ++ method_name_length = int(method_sym['_length']) ++ self.method_str = CodeCache.makestr(method_name_length, method_name) ++ ++ sig_idx = const_method['_signature_index'] ++ sig_sym = (constant_pool_base + (8 * sig_idx)).cast(gdb.lookup_type("Symbol").pointer().pointer()).dereference() ++ sig_name = sig_sym['_body'].cast(gdb.lookup_type("char").pointer()) ++ sig_name_length = int(sig_sym['_length']) ++ sig_str = CodeCache.makestr(sig_name_length, sig_name) ++ self.sig_str = self.make_sig_str(sig_str) ++ self.name = self.klass_str + "." + self.method_str + self.sig_str ++ ++ def make_sig_str(self, sig): ++ in_sym_name = False ++ sig_str = "" ++ prefix="" ++ for i, c in enumerate(sig): ++ if c == "(": ++ sig_str = sig_str + c ++ elif c == ")": ++ # ignore return type ++ return sig_str + c ++ elif in_sym_name == True: ++ if c == ";": ++ in_sym_name = False ++ elif c == "/": ++ sig_str = sig_str + "." ++ else: ++ sig_str = sig_str + c ++ elif c == "L": ++ sig_str = sig_str + prefix ++ prefix = "," ++ in_sym_name = True ++ elif c == "B": ++ sig_str = sig_str + prefix + "byte" ++ prefix = "," ++ elif c == "S": ++ sig_str = sig_str + prefix + "short" ++ prefix = "," ++ elif c == "C": ++ sig_str = sig_str + prefix + "char" ++ prefix = "," ++ elif c == "I": ++ sig_str = sig_str + prefix + "int" ++ prefix = "," ++ elif c == "J": ++ sig_str = sig_str + prefix + "long" ++ prefix = "," ++ elif c == "F": ++ sig_str = sig_str + prefix + "float" ++ prefix = "," ++ elif c == "D": ++ sig_str = sig_str + prefix + "double" ++ prefix = "," ++ elif c == "Z": ++ sig_str = sig_str + prefix + "boolean" ++ prefix = "," ++ return sig_str ++ ++# This represents a method in a JIT frame for the purposes of ++# display. There may be more than one method associated with ++# a (compiled) frame because of inlining. The MethodInfo object ++# associated with a JIT frame creates a list of decorators. ++# Interpreted and stub MethodInfo objects insert only one ++# decorator. Compiled MethodInfo objects insert one for the ++# base method and one for each inlined method found at the ++# frame PC address. ++class OpenJDKFrameDecorator(FrameDecorator): ++ def __init__(self, base, methodname, filename, line): ++ super(FrameDecorator, self).__init__() ++ self._base = base ++ self._methodname = methodname ++ self._filename = filename ++ self._line = line ++ ++ def function(self): ++ try: ++ # t("OpenJDKFrameDecorator.function") ++ return self._methodname ++ except Exception as arg: ++ gdb.write("!!! function oops !!! %s\n" % arg) ++ return None ++ ++ def method_name(self): ++ return _methodname ++ ++ def filename(self): ++ try: ++ return self._filename ++ except Exception as arg: ++ gdb.write("!!! filename oops !!! %s\n" % arg) ++ return None ++ ++ def line(self): ++ try: ++ return self._line ++ except Exception as arg: ++ gdb.write("!!! line oops !!! %s\n" % arg) ++ return None ++ ++# A frame filter for OpenJDK. ++class OpenJDKFrameFilter(object): ++ def __init__(self, unwinder): ++ self.name="OpenJDK" ++ self.enabled = True ++ self.priority = 100 ++ self.unwinder = unwinder ++ ++ def maybe_wrap_frame(self, frame): ++ # t("OpenJDKFrameFilter.maybe_wrap_frame") ++ if self.unwinder is None: ++ return [ frame ] ++ # t("unwindercache = self.unwinder.unwindercache") ++ unwindercache = self.unwinder.unwindercache ++ if unwindercache is None: ++ return [ frame ] ++ # t("base = frame.inferior_frame()") ++ base = frame.inferior_frame() ++ # t("sp = Types.as_long(base.read_register('rsp'))") ++ sp = base.read_register('rsp') ++ x = Types.as_long(sp) ++ # debug_write("@@ get info at unwindercache[0x%x]\n" % x) ++ try: ++ cache_entry = unwindercache[x] ++ except Exception as arg: ++ # n.b. no such entry throws an exception ++ # just ignore and use existing frame ++ return [ frame ] ++ try: ++ if cache_entry is None: ++ # debug_write("@@ lookup found no cache_entry\n") ++ return [ frame ] ++ elif cache_entry.codetype == "unknown": ++ # debug_write("@@ lookup found unknown cache_entry\n") ++ return [ frame ] ++ else: ++ # debug_write("@@ got cache_entry for blob 0x%x at unwindercache[0x%x]\n" % (cache_entry.blob, x)) ++ method_info = cache_entry.method_info ++ if method_info == None: ++ return [ frame ] ++ else: ++ return method_info.decorate(frame) ++ except Exception as arg: ++ gdb.write("!!! maybe_wrap_frame oops !!! %s\n" % arg) ++ return [ frame ] ++ ++ def flatten(self, list_of_lists): ++ return [x for y in list_of_lists for x in y ] ++ ++ def filter(self, frame_iter): ++ # return map(self.maybe_wrap_frame, frame_iter) ++ return self.flatten( map(self.maybe_wrap_frame, frame_iter) ) ++ ++ ++# A frame id class, as specified by the gdb unwinder API. ++class OpenJDKFrameId(object): ++ def __init__(self, sp, pc): ++ # t("OpenJDKFrameId.__init__") ++ self.sp = sp ++ self.pc = pc ++ ++# class hierarchy to record details of different code blobs ++# the class implements functionality required by the frame decorator ++ ++class MethodInfo(object): ++ def __init__(self, entry): ++ self.blob = entry.blob ++ self.pc = Types.as_long(entry.pc) ++ self.sp = Types.as_long(entry.sp) ++ if entry.bp is None: ++ self.bp = 0 ++ else: ++ self.bp = Types.as_long(entry.bp) ++ ++ def decorate(self, frame): ++ return [ frame ] ++ ++# info for stub frame ++ ++class StubMethodInfo(MethodInfo): ++ def __init__(self, entry, name): ++ super(StubMethodInfo, self).__init__(entry) ++ self.name = name ++ ++ def decorate(self, frame): ++ return [ OpenJDKFrameDecorator(frame, self.name, None, None) ] ++ ++# common info for compiled, native or interpreted frame ++class JavaMethodInfo(MethodInfo): ++ ++ def __init__(self, entry): ++ super(JavaMethodInfo, self).__init__(entry) ++ ++ def cache_method_info(self): ++ methodptr = self.get_methodptr() ++ self.method = Method(methodptr) ++ ++ ++ def get_method(self): ++ return self.method ++ ++ def method_name(self): ++ return self.get_method().get_name() ++ ++ def filename(self): ++ return self.get_method().get_klass_path() ++ ++ def decorate(self, frame): ++ return [ OpenJDKFrameDecorator(frame, self.method_name(), self.filename(), None) ] ++ ++# info for compiled frame ++ ++class CompiledMethodInfo(JavaMethodInfo): ++ ++ def __init__(self, entry): ++ # t("CompiledMethodInfo.__init__") ++ super(CompiledMethodInfo,self).__init__(entry) ++ blob = self.blob ++ cmethod = Types.to_type(blob, Types.cmethodp_t) ++ nmethod = Types.to_type(blob, Types.nmethodp_t) ++ self.methodptr = cmethod['_method'] ++ const_method = self.methodptr['_constMethod'] ++ bcbase = Types.cast_bytep(const_method + 1) ++ self.code_begin = Types.as_long(blob['_code_begin']) ++ # get bc and line number info from method ++ self.cache_method_info() ++ # get PC to BCI translator from the nmethod ++ self.bytecode_index_reader = MethodBCIReader(nmethod, self.method) ++ # t("self.method_bci_stack = self.bytecode_index_reader.pc_to_method_bci_stack(self.pc)") ++ self.method_bci_stack = self.bytecode_index_reader.pc_to_method_bci_stack(self.pc) ++ ++ # subclasses need to compute their method pointer ++ ++ def get_methodptr(self): ++ return self.methodptr ++ ++ def format_method_name(self, method, is_outer): ++ name = method.get_name() ++ if is_outer: ++ return ("[compiled offset=0x%x] %s" % (self.pc - self.code_begin, name)) ++ else: ++ return ("[inlined] %s" % name) ++ ++ def make_decorator(self, frame, pair, is_outer): ++ # t("make_decorator") ++ method = pair['method'] ++ bci = pair['bci'] ++ methodname = self.format_method_name(method, is_outer) ++ filename = method.get_klass_path() ++ line = method.get_line(bci) ++ if line < 0: ++ line = None ++ decorator = OpenJDKFrameDecorator(frame, methodname, filename, line) ++ return decorator ++ ++ def decorate(self, frame): ++ if self.method_bci_stack == None: ++ return [ frame ] ++ else: ++ try: ++ decorators = [] ++ pairs = self.method_bci_stack ++ # debug_write("converting method_bci_stack = %s\n" % self.method_bci_stack) ++ l = len(pairs) ++ for i in range(l): ++ pair = pairs[i] ++ # debug_write("decorating pair %s\n" % pair) ++ decorator = self.make_decorator(frame, pair, i == (l - 1)) ++ decorators.append(decorator) ++ return decorators ++ except Exception as arg: ++ gdb.write("!!! decorate oops %s !!!\n" % arg) ++ return [ frame ] ++ ++# info for native frame ++ ++class NativeMethodInfo(JavaMethodInfo): ++ ++ def __init__(self, entry): ++ # t("NativeMethodInfo.__init__") ++ super(NativeMethodInfo,self).__init__(entry) ++ blob = self.blob ++ cmethod = Types.to_type(blob, Types.cmethodp_t) ++ nmethod = Types.to_type(blob, Types.nmethodp_t) ++ self.methodptr = cmethod['_method'] ++ const_method = self.methodptr['_constMethod'] ++ bcbase = Types.cast_bytep(const_method + 1) ++ self.code_begin = Types.as_long(blob['_code_begin']) ++ # get bc and line number info from method ++ self.cache_method_info() ++ ++ # subclasses need to compute their method pointer ++ ++ def get_methodptr(self): ++ return self.methodptr ++ ++ def format_method_name(self, method): ++ name = method.get_name() ++ return ("[native offset=0x%x] %s" % (self.pc - self.code_begin, name)) ++ ++ def method_name(self): ++ return self.format_method_name(self.method) ++ ++# info for interpreted frame ++ ++class InterpretedMethodInfo(JavaMethodInfo): ++ ++ def __init__(self, entry, bcp): ++ super(InterpretedMethodInfo,self).__init__(entry) ++ # interpreter frames store methodptr in slot 3 ++ methodptr_offset = FrameConstants.interpreter_frame_method_offset() * 8 ++ methodptr_addr = gdb.Value((self.bp + methodptr_offset) & 0xffffffffffffffff) ++ methodptr_slot = methodptr_addr.cast(gdb.lookup_type("Method").pointer().pointer()) ++ self.methodptr = methodptr_slot.dereference() ++ # bytecode immediately follows const method ++ const_method = self.methodptr['_constMethod'] ++ bcbase = Types.cast_bytep(const_method + 1) ++ # debug_write("@@ bcbase = 0x%x\n" % Types.as_long(bcbase)) ++ bcp_offset = FrameConstants.interpreter_frame_bcp_offset() * 8 ++ if bcp is None: ++ # interpreter frames store bytecodeptr in slot 8 ++ bcp_addr = gdb.Value((self.bp + bcp_offset) & 0xffffffffffffffff) ++ bcp_val = Types.cast_bytep(Types.load_ptr(bcp_addr)) ++ else: ++ bcp_val = Types.cast_bytep(bcp) ++ # sanity check the register value -- it is sometimes off ++ # when returning from a call out ++ bcoff = Types.as_long(bcp_val - bcbase) ++ if bcoff < 0 or bcoff >= 0x10000: ++ # use the value in the frame slot ++ bcp_addr = gdb.Value((self.bp + bcp_offset) & 0xffffffffffffffff) ++ bcp_val = Types.cast_bytep(Types.load_ptr(bcp_addr)) ++ self.bcoff = Types.as_long(bcp_val - bcbase) ++ # debug_write("@@ bcoff = 0x%x\n" % self.bcoff) ++ # line number table immediately following bytecode ++ bytecode_size = Types.as_int(const_method['_code_size']) ++ self.is_native = (bytecode_size == 0) ++ # n.b. data in compressed line_number_table block is u_char ++ # debug_write("bytecode_size = 0x%x\n" % bytecode_size) ++ lnbase = Types.cast_bytep(bcbase + bytecode_size) ++ # debug_write("lnbase = 0x%x\n" % Types.as_long(lnbase)) ++ self.line_number_reader = LineReader(lnbase) ++ self.cache_method_info() ++ ++ def get_methodptr(self): ++ return self.methodptr ++ ++ def format_method_name(self, method): ++ name = method.get_name() ++ if self.is_native: ++ return "[native] " + name ++ else: ++ return ("[interpreted: bc = %d] " % self.bcoff) + name ++ ++ def line(self): ++ line = self.line_number_reader.bc_to_line(self.bcoff) ++ # debug_write("bc_to_line(%d) = %d\n" % (self.bcoff, line)) ++ if line < 0: ++ line = None ++ return line ++ ++ def decorate(self, frame): ++ method = self.get_method() ++ return [ OpenJDKFrameDecorator(frame, self.format_method_name(method), method.get_klass_path(), self.line()) ] ++ ++ ++class OpenJDKUnwinderCacheEntry(object): ++ def __init__(self, blob, sp, pc, bp, bcp, name, codetype): ++ # t("OpenJDKUnwinderCacheEntry.__init__") ++ self.blob = blob ++ self.sp = sp ++ self.pc = pc ++ self.bp = bp ++ self.codetype = codetype ++ try: ++ if codetype == "compiled": ++ self.method_info = CompiledMethodInfo(self) ++ elif codetype == "native": ++ self.method_info = NativeMethodInfo(self) ++ elif codetype == "interpreted": ++ self.method_info = InterpretedMethodInfo(self, bcp) ++ elif codetype == "stub": ++ self.method_info = StubMethodInfo(self, name) ++ else: ++ self.method_info = None ++ except Exception as arg: ++ gdb.write("!!! failed to cache info for %s frame [pc: 0x%x sp:0x%x bp 0x%x] !!!\n!!! %s !!!\n" % (codetype, pc, sp, bp, arg)) ++ self.method_info = None ++ ++# an unwinder class, an instance of which can be registered with gdb ++# to handle unwinding of frames. ++ ++class OpenJDKUnwinder(Unwinder): ++ def __init__(self): ++ # t("OpenJDKUnwinder.__init__") ++ super(OpenJDKUnwinder, self).__init__("OpenJDKUnwinder") ++ # blob name will be in format '0xHexDigits "AlphaNumSpaces"' ++ self.matcher=re.compile('^0x[a-fA-F0-9]+ "(.*)"$') ++ self.unwindercache = {} ++ self.invocations = {} ++ ++ # the method that gets called by the pyuw_sniffer ++ def __call__(self, pending_frame): ++ # sometimes when we call into python gdb routines ++ # the call tries to re-establish the frame and ends ++ # up calling the frame sniffer recursively ++ # ++ # so use a list keyed by thread to avoid recursive calls ++ # t("OpenJDKUnwinder.__call__") ++ thread = gdb.selected_thread() ++ if self.invocations.get(thread) != None: ++ # debug_write("!!! blocked %s !!!\n" % str(thread)) ++ return None ++ try: ++ # debug_write("!!! blocking %s !!!\n" % str(thread)) ++ self.invocations[thread] = thread ++ result = self.call_sub(pending_frame) ++ # debug_write("!!! unblocking %s !!!\n" % str(thread)) ++ self.invocations[thread] = None ++ return result ++ except Exception as arg: ++ gdb.write("!!! __call__ oops %s !!!\n" % arg) ++ # debug_write("!!! unblocking %s !!!\n" % str(thread)) ++ self.invocations[thread] = None ++ return None ++ ++ def call_sub(self, pending_frame): ++ # t("OpenJDKUnwinder.__call_sub__") ++ # debug_write("@@ reading pending frame registers\n") ++ pc = pending_frame.read_register('rip') ++ # debug_write("@@ pc = 0x%x\n" % Types.as_long(pc)) ++ sp = pending_frame.read_register('rsp') ++ # debug_write("@@ sp = 0x%x\n" % Types.as_long(sp)) ++ bp = pending_frame.read_register('rbp') ++ # debug_write("@@ bp = 0x%x\n" % Types.as_long(bp)) ++ try: ++ if not CodeCache.inrange(pc): ++ # t("not CodeCache.inrange(0x%x)\n" % pc) ++ return None ++ except Exception as arg: ++ # debug_write("@@ %s\n" % arg) ++ return None ++ if _dump_frame: ++ debug_write(" pc = 0x%x\n" % Types.as_long(pc)) ++ debug_write(" sp = 0x%x\n" % Types.as_long(sp)) ++ debug_write(" bp = 0x%x\n" % Types.as_long(bp)) ++ # this is a Java frame so we need to return unwind info ++ # ++ # the interpreter keeps the bytecode pointer (bcp) in a ++ # register and saves it to the stack when it makes a call to ++ # another Java method or to C++. if the current frame has a ++ # bcp register value then we pass it in case the pending frame ++ # is an intrepreter frame. that ensures we use the most up to ++ # date bcp when the inferior has stopped at the top level in a ++ # Java interpreted frame. it also saves us doing a redundant ++ # stack read when the pending frame sits below a non-JITted ++ # (C++) frame. n.b. if the current frame is a JITted frame ++ # (i.e. one that we have already unwound) then rbp will not be ++ # present. that's ok because the frame decorator can still ++ # find the latest bcp value on the stack. ++ bcp = pending_frame.read_register('r13') ++ try: ++ # convert returned value to a python int to force a check that ++ # the register is defined. if not this will except ++ bcp = gdb.Value(int(bcp)) ++ # debug_write("@@ bcp = 0x%x\n" % Types.as_long(bcp)) ++ except Exception as arg: ++ # debug_write("@@ !!! call_sub oops %s !!! \n" % arg) ++ bcp = None ++ # debug_write("@@ bcp = None\n") ++ # t("blob = CodeCache.findblob(pc)") ++ blob = CodeCache.findblob(pc) ++ # t("if blob is None:") ++ if blob is None: ++ # t("return None") ++ return None ++ # if the blob is an nmethod then we use the frame ++ # size to identify the frame base otherwise we ++ # use the value in rbp ++ # t("name = str(blob['_name'])") ++ name = str(blob['_name']) ++ # blob name will be in format '0xHexDigits "AlphaNumSpaces"' ++ # and we just want the bit between the quotes ++ m = self.matcher.match(name) ++ if not m is None: ++ # debug_write("@@ m.group(1) == %s\n" % m.group(1)) ++ name = m.group(1) ++ if name == "nmethod": ++ # debug_write("@@ compiled %s\n" % name) ++ codetype = 'compiled' ++ # TODO -- need to check if frame is complete ++ # i.e. if ((char *)pc - (char *)blob) > blob['_code_begin'] + blob['_frame_complete_offset'] ++ # if not then we have not pushed a frame. ++ # what do we do then? use SP as BP??? ++ frame_size = blob['_frame_size'] ++ # debug_write("@@ frame_size = 0x%x\n" % int(frame_size)) ++ # n.b. frame_size includes stacked rbp and rip hence the -2 ++ bp = sp + ((frame_size - 2) * 8) ++ # debug_write("@@ revised bp = 0x%x\n" % Types.as_long(bp)) ++ elif name == "native nmethod": ++ # debug_write("@@ native %s \n" % name) ++ codetype = "native" ++ elif name == "Interpreter": ++ # debug_write("@@ interpreted %s\n" %name) ++ codetype = "interpreted" ++ elif name[:4] == "Stub": ++ # debug_write("@@ stub %s\n" % name) ++ codetype = "stub" ++ else: ++ # debug_write("@@ unknown %s\n" % name) ++ codetype = "unknown" ++ # cache details of the current frame ++ x = Types.as_long(sp) ++ # debug_write("@@ add %s cache entry for blob 0x%x at unwindercache[0x%x]\n" % (codetype, blob, x)) ++ self.unwindercache[x] = OpenJDKUnwinderCacheEntry(blob, sp, pc, bp, bcp, name, codetype) ++ # t("next_bp = Types.load_long(bp)") ++ next_bp = Types.load_long(bp) ++ # t("next_pc = Types.load_long(bp + 8)") ++ next_pc = Types.load_long(bp + 8) ++ # next_sp is normally just 2 words below current bp ++ # but for interpreted frames we need to skip locals ++ # so we pull caller_sp from the frame ++ if codetype == "interpreted": ++ interpreter_frame_sender_sp_offset = FrameConstants.interpreter_frame_sender_sp_offset() * 8 ++ # interpreter frames store sender sp in slot 1 ++ next_sp = Types.load_long(bp + interpreter_frame_sender_sp_offset) ++ else: ++ sender_sp_offset = FrameConstants.sender_sp_offset() * 8 ++ next_sp = bp + sender_sp_offset ++ # create unwind info for this frame ++ # t("frameid = OpenJDKFrameId(...)") ++ frameid = OpenJDKFrameId(Types.to_voidp(next_sp), ++ Types.to_type(next_pc, pc.type)) ++ # debug_write("@@ created frame id\n") ++ # t("unwind_info = pending_frame.create_unwind_info(frameid)") ++ unwind_info = pending_frame.create_unwind_info(frameid) ++ # debug_write("@@ created unwind info\n") ++ # debug_write("@@ next_bp = 0x%x\n" % next_bp) ++ # debug_write("@@ next_pc = 0x%x\n" % next_pc) ++ # debug_write("@@ next_sp = 0x%x\n" % next_sp) ++ # we must calculate pc, sp and bp. ++ # ++ # for now we only add the minimum of registers that we know ++ # are valid. ++ unwind_info.add_saved_register('rip', next_pc) ++ unwind_info.add_saved_register('rsp', next_sp) ++ unwind_info.add_saved_register('rbp', next_bp) ++ if _dump_frame: ++ debug_write("next pc = 0x%x\n" % Types.as_long(next_pc)) ++ debug_write("next sp = 0x%x\n" % Types.as_long(next_sp)) ++ debug_write("next bp = 0x%x\n" % Types.as_long(next_bp)) ++ # t("return unwind_info") ++ return unwind_info ++ ++# register the unwinder globally [probably really needs to be ++# registered solely with libjvm.so] ++def register_unwinder(): ++ unwinder = None ++ if _have_unwinder: ++ unwinder = OpenJDKUnwinder() ++ gdb.unwinder.register_unwinder(None, unwinder, replace=True) ++ filt = OpenJDKFrameFilter(unwinder) ++ gdb.frame_filters[filt.name] = filt ++ ++register_unwinder() diff --cc debian/font.properties.ja index 000000000,000000000..91916715f new file mode 100644 --- /dev/null +++ b/debian/font.properties.ja @@@ -1,0 -1,0 +1,197 @@@ ++# @(#)font.properties.ja.linux 1.7 02/03/07 ++# ++# Copyright 2002 Sun Microsystems, Inc. All rights reserved. ++# ++ ++# Component Font Mappings ++# ++serif.0=-b&h-lucidabright-medium-r-normal--*-%d-*-*-p-*-iso8859-1 ++serif.1=-kochi-mincho-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++ ++serif.italic.0=-b&h-lucidabright-medium-i-normal--*-%d-*-*-p-*-iso8859-1 ++serif.italic.1=-kochi-mincho-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++ ++serif.bold.0=-b&h-lucidabright-demibold-r-normal--*-%d-*-*-p-*-iso8859-1 ++serif.bold.1=-kochi-mincho-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++ ++serif.bolditalic.0=-b&h-lucidabright-demibold-i-normal--*-%d-*-*-p-*-iso8859-1 ++serif.bolditalic.1=-kochi-mincho-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++ ++sansserif.0=-b&h-lucidasans-medium-r-normal-sans-*-%d-*-*-p-*-iso8859-1 ++sansserif.1=-kochi-gothic-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++ ++sansserif.italic.0=-b&h-lucidasans-medium-i-normal-sans-*-%d-*-*-p-*-iso8859-1 ++sansserif.italic.1=-kochi-gothic-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++ ++sansserif.bold.0=-b&h-lucidasans-bold-r-normal-sans-*-%d-*-*-p-*-iso8859-1 ++sansserif.bold.1=-kochi-gothic-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++ ++sansserif.bolditalic.0=-b&h-lucidasans-bold-i-normal-sans-*-%d-*-*-p-*-iso8859-1 ++sansserif.bolditalic.1=-kochi-gothic-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++ ++monospaced.0=-b&h-lucidatypewriter-medium-r-normal-sans-*-%d-*-*-m-*-iso8859-1 ++monospaced.1=-kochi-mincho-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++ ++monospaced.italic.0=-b&h-lucidatypewriter-medium-r-normal-sans-*-%d-*-*-m-*-iso8859-1 ++monospaced.italic.1=-kochi-mincho-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++ ++monospaced.bold.0=-b&h-lucidatypewriter-bold-r-normal-sans-*-%d-*-*-m-*-iso8859-1 ++monospaced.bold.1=-kochi-mincho-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++ ++monospaced.bolditalic.0=-b&h-lucidatypewriter-bold-r-normal-sans-*-%d-*-*-m-*-iso8859-1 ++monospaced.bolditalic.1=-kochi-mincho-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++ ++dialog.0=-b&h-lucidasans-medium-r-normal-sans-*-%d-*-*-p-*-iso8859-1 ++dialog.1=-kochi-gothic-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++ ++dialog.italic.0=-b&h-lucidasans-medium-i-normal-sans-*-%d-*-*-p-*-iso8859-1 ++dialog.italic.1=-kochi-gothic-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++ ++dialog.bold.0=-b&h-lucidasans-bold-r-normal-sans-*-%d-*-*-p-*-iso8859-1 ++dialog.bold.1=-kochi-gothic-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++ ++dialog.bolditalic.0=-b&h-lucidasans-bold-i-normal-sans-*-%d-*-*-p-*-iso8859-1 ++dialog.bolditalic.1=-kochi-gothic-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++ ++dialoginput.0=-b&h-lucidatypewriter-medium-r-normal-sans-*-%d-*-*-m-*-iso8859-1 ++dialoginput.1=-kochi-mincho-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++ ++dialoginput.italic.0=-b&h-lucidatypewriter-medium-r-normal-sans-*-%d-*-*-m-*-iso8859-1 ++dialoginput.italic.1=-kochi-mincho-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++ ++dialoginput.bold.0=-b&h-lucidatypewriter-bold-r-normal-sans-*-%d-*-*-m-*-iso8859-1 ++dialoginput.bold.1=-kochi-mincho-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++ ++dialoginput.bolditalic.0=-b&h-lucidatypewriter-bold-r-normal-sans-*-%d-*-*-m-*-iso8859-1 ++dialoginput.bolditalic.1=-kochi-mincho-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++ ++# Missing Glyph Character ++# ++default.char=274f ++ ++# Component Font Character Encodings ++# ++fontcharset.serif.0=sun.io.CharToByteISO8859_1 ++fontcharset.serif.1=sun.awt.motif.CharToByteX11JIS0208 ++ ++fontcharset.sansserif.0=sun.io.CharToByteISO8859_1 ++fontcharset.sansserif.1=sun.awt.motif.CharToByteX11JIS0208 ++ ++fontcharset.monospaced.0=sun.io.CharToByteISO8859_1 ++fontcharset.monospaced.1=sun.awt.motif.CharToByteX11JIS0208 ++ ++fontcharset.dialog.0=sun.io.CharToByteISO8859_1 ++fontcharset.dialog.1=sun.awt.motif.CharToByteX11JIS0208 ++ ++fontcharset.dialoginput.0=sun.io.CharToByteISO8859_1 ++fontcharset.dialoginput.1=sun.awt.motif.CharToByteX11JIS0208 ++ ++# Exclusion Ranges ++# ++ ++# XFontSet Information ++# ++fontset.serif.plain=\ ++-sony-fixed-medium-r-normal--*-%d-*-*-c-*-jisx0201.1976-0,\ ++-b&h-lucidabright-medium-r-normal--*-%d-*-*-p-*-iso8859-1,\ ++-kochi-mincho-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++ ++fontset.serif.italic=\ ++-sony-fixed-medium-r-normal--*-%d-*-*-c-*-jisx0201.1976-0,\ ++-b&h-lucidabright-medium-i-normal--*-%d-*-*-p-*-iso8859-1,\ ++-kochi-mincho-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++ ++fontset.serif.bold=\ ++-sony-fixed-medium-r-normal--*-%d-*-*-c-*-jisx0201.1976-0,\ ++-b&h-lucidabright-demibold-r-normal--*-%d-*-*-p-*-iso8859-1,\ ++-kochi-mincho-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++ ++fontset.serif.bolditalic=\ ++-sony-fixed-medium-r-normal--*-%d-*-*-c-*-jisx0201.1976-0,\ ++-b&h-lucidabright-demibold-i-normal--*-%d-*-*-p-*-iso8859-1,\ ++-kochi-mincho-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++ ++fontset.sansserif.plain=\ ++-sony-fixed-medium-r-normal--*-%d-*-*-c-*-jisx0201.1976-0,\ ++-b&h-lucida-medium-i-normal-sans-*-%d-*-*-p-*-iso8859-1,\ ++-kochi-gothic-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++ ++fontset.sansserif.italic=\ ++-sony-fixed-medium-r-normal--*-%d-*-*-c-*-jisx0201.1976-0,\ ++-b&h-lucida-medium-i-normal-sans-*-%d-*-*-p-*-iso8859-1,\ ++-kochi-gothic-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++ ++fontset.sansserif.bold=\ ++-sony-fixed-medium-r-normal--*-%d-*-*-c-*-jisx0201.1976-0,\ ++-b&h-lucida-bold-r-normal-sans-*-%d-*-*-p-*-iso8859-1,\ ++-kochi-gothic-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++ ++fontset.sansserif.bolditalic=\ ++-sony-fixed-medium-r-normal--*-%d-*-*-c-*-jisx0201.1976-0,\ ++-b&h-lucida-bold-i-normal-sans-*-%d-*-*-p-*-iso8859-1,\ ++-kochi-gothic-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++ ++fontset.monospaced.plain=\ ++-sony-fixed-medium-r-normal--*-%d-*-*-c-*-jisx0201.1976-0,\ ++-b&h-lucidatypewriter-medium-r-normal-sans-*-%d-*-*-m-*-iso8859-1,\ ++-kochi-mincho-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++ ++fontset.monospaced.italic=\ ++-sony-fixed-medium-r-normal--*-%d-*-*-c-*-jisx0201.1976-0,\ ++-b&h-lucidatypewriter-medium-r-normal-sans-*-%d-*-*-m-*-iso8859-1,\ ++-kochi-mincho-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++ ++fontset.monospaced.bold=\ ++-sony-fixed-medium-r-normal--*-%d-*-*-c-*-jisx0201.1976-0,\ ++-b&h-lucidatypewriter-bold-r-normal-sans-*-%d-*-*-m-*-iso8859-1,\ ++-kochi-mincho-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++ ++fontset.monospaced.bolditalic=\ ++-sony-fixed-medium-r-normal--*-%d-*-*-c-*-jisx0201.1976-0,\ ++-b&h-lucidatypewriter-bold-r-normal-sans-*-%d-*-*-m-*-iso8859-1,\ ++-kochi-mincho-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++ ++fontset.dialog.italic=\ ++-sony-fixed-medium-r-normal--*-%d-*-*-c-*-jisx0201.1976-0,\ ++-b&h-lucida-medium-i-normal-sans-*-%d-*-*-p-*-iso8859-1,\ ++-kochi-gothic-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++ ++fontset.dialog.bold=\ ++-sony-fixed-medium-r-normal--*-%d-*-*-c-*-jisx0201.1976-0,\ ++-b&h-lucida-bold-r-normal-sans-*-%d-*-*-p-*-iso8859-1,\ ++-kochi-gothic-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++ ++fontset.dialog.bolditalic=\ ++-sony-fixed-medium-r-normal--*-%d-*-*-c-*-jisx0201.1976-0,\ ++-b&h-lucida-bold-i-normal-sans-*-%d-*-*-p-*-iso8859-1,\ ++-kochi-gothic-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++ ++fontset.dialog.plain=\ ++-sony-fixed-medium-r-normal--*-%d-*-*-c-*-jisx0201.1976-0,\ ++-b&h-lucida-medium-r-normal-sans-*-%d-*-*-p-*-iso8859-1,\ ++-kochi-gothic-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++ ++fontset.dialoginput.italic=\ ++-sony-fixed-medium-r-normal--*-%d-*-*-c-*-jisx0201.1976-0,\ ++-b&h-lucidatypewriter-medium-r-normal-sans-*-%d-*-*-m-*-iso8859-1,\ ++-kochi-mincho-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++ ++fontset.dialoginput.bold=\ ++-sony-fixed-medium-r-normal--*-%d-*-*-c-*-jisx0201.1976-0,\ ++-b&h-lucidatypewriter-bold-r-normal-sans-*-%d-*-*-m-*-iso8859-1,\ ++-kochi-mincho-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++ ++fontset.dialoginput.bolditalic=\ ++-sony-fixed-medium-r-normal--*-%d-*-*-c-*-jisx0201.1976-0,\ ++-b&h-lucidatypewriter-bold-r-normal-sans-*-%d-*-*-m-*-iso8859-1,\ ++-kochi-mincho-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++ ++fontset.dialoginput.plain=\ ++-sony-fixed-medium-r-normal--*-%d-*-*-c-*-jisx0201.1976-0,\ ++-b&h-lucidatypewriter-medium-r-normal-sans-*-%d-*-*-m-*-iso8859-1,\ ++-kochi-mincho-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++ ++fontset.default=\ ++-sony-fixed-medium-r-normal--*-%d-*-*-c-*-jisx0201.1976-0,\ ++-b&h-lucida-medium-r-normal-sans-*-%d-*-*-p-*-iso8859-1,\ ++-kochi-mincho-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 diff --cc debian/font.properties.ja.wqy_zenhai index 000000000,000000000..119822d83 new file mode 100644 --- /dev/null +++ b/debian/font.properties.ja.wqy_zenhai @@@ -1,0 -1,0 +1,244 @@@ ++# @(#)font.properties.ja.linux 1.7 02/03/07 ++# ++# Copyright 2002 Sun Microsystems, Inc. All rights reserved. ++# ++ ++# Component Font Mappings ++# ++serif.0=-b&h-lucidabright-medium-r-normal--*-%d-*-*-p-*-iso8859-1 ++serif.1=-kochi-mincho-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++serif.2=-sazanami-mincho-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++ ++serif.italic.0=-b&h-lucidabright-medium-i-normal--*-%d-*-*-p-*-iso8859-1 ++serif.italic.1=-kochi-mincho-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++serif.italic.2=-sazanami-mincho-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++ ++serif.bold.0=-b&h-lucidabright-demibold-r-normal--*-%d-*-*-p-*-iso8859-1 ++serif.bold.1=-kochi-mincho-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++serif.bold.2=-sazanami-mincho-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++ ++serif.bolditalic.0=-b&h-lucidabright-demibold-i-normal--*-%d-*-*-p-*-iso8859-1 ++serif.bolditalic.1=-kochi-mincho-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++serif.bolditalic.2=-sazanami-mincho-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++ ++sansserif.0=-b&h-lucidasans-medium-r-normal-sans-*-%d-*-*-p-*-iso8859-1 ++sansserif.1=-kochi-gothic-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++sansserif.2=-sazanami-gothic-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++ ++sansserif.italic.0=-b&h-lucidasans-medium-i-normal-sans-*-%d-*-*-p-*-iso8859-1 ++sansserif.italic.1=-kochi-gothic-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++sansserif.italic.2=-sazanami-gothic-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++ ++sansserif.bold.0=-b&h-lucidasans-bold-r-normal-sans-*-%d-*-*-p-*-iso8859-1 ++sansserif.bold.1=-kochi-gothic-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++sansserif.bold.2=-sazanami-gothic-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++ ++sansserif.bolditalic.0=-b&h-lucidasans-bold-i-normal-sans-*-%d-*-*-p-*-iso8859-1 ++sansserif.bolditalic.1=-kochi-gothic-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++sansserif.bolditalic.2=-sazanami-gothic-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++ ++monospaced.0=-b&h-lucidatypewriter-medium-r-normal-sans-*-%d-*-*-m-*-iso8859-1 ++monospaced.1=-kochi-mincho-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++monospaced.2=-sazanami-mincho-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++ ++monospaced.italic.0=-b&h-lucidatypewriter-medium-r-normal-sans-*-%d-*-*-m-*-iso8859-1 ++monospaced.italic.1=-kochi-mincho-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++monospaced.italic.2=-sazanami-mincho-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++ ++monospaced.bold.0=-b&h-lucidatypewriter-bold-r-normal-sans-*-%d-*-*-m-*-iso8859-1 ++monospaced.bold.1=-kochi-mincho-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++monospaced.bold.2=-sazanami-mincho-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++ ++monospaced.bolditalic.0=-b&h-lucidatypewriter-bold-r-normal-sans-*-%d-*-*-m-*-iso8859-1 ++monospaced.bolditalic.1=-kochi-mincho-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++monospaced.bolditalic.2=-sazanami-mincho-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++ ++dialog.0=-b&h-lucidasans-medium-r-normal-sans-*-%d-*-*-p-*-iso8859-1 ++dialog.1=-kochi-gothic-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++dialog.2=-sazanami-gothic-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++ ++dialog.italic.0=-b&h-lucidasans-medium-i-normal-sans-*-%d-*-*-p-*-iso8859-1 ++dialog.italic.1=-kochi-gothic-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++dialog.italic.2=-sazanami-gothic-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++ ++dialog.bold.0=-b&h-lucidasans-bold-r-normal-sans-*-%d-*-*-p-*-iso8859-1 ++dialog.bold.1=-kochi-gothic-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++dialog.bold.2=-sazanami-gothic-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++ ++dialog.bolditalic.0=-b&h-lucidasans-bold-i-normal-sans-*-%d-*-*-p-*-iso8859-1 ++dialog.bolditalic.1=-kochi-gothic-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++dialog.bolditalic.2=-sazanami-gothic-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++ ++dialoginput.0=-b&h-lucidatypewriter-medium-r-normal-sans-*-%d-*-*-m-*-iso8859-1 ++dialoginput.1=-kochi-mincho-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++dialoginput.2=-sazanami-mincho-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++ ++dialoginput.italic.0=-b&h-lucidatypewriter-medium-r-normal-sans-*-%d-*-*-m-*-iso8859-1 ++dialoginput.italic.1=-kochi-mincho-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++dialoginput.italic.2=-sazanami-mincho-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++ ++dialoginput.bold.0=-b&h-lucidatypewriter-bold-r-normal-sans-*-%d-*-*-m-*-iso8859-1 ++dialoginput.bold.1=-kochi-mincho-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++dialoginput.bold.2=-sazanami-mincho-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++ ++dialoginput.bolditalic.0=-b&h-lucidatypewriter-bold-r-normal-sans-*-%d-*-*-m-*-iso8859-1 ++dialoginput.bolditalic.1=-kochi-mincho-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++dialoginput.bolditalic.2=-sazanami-mincho-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++ ++# Missing Glyph Character ++# ++default.char=274f ++ ++# Component Font Character Encodings ++# ++fontcharset.serif.0=sun.io.CharToByteISO8859_1 ++fontcharset.serif.1=sun.awt.motif.CharToByteX11JIS0208 ++fontcharset.serif.2=sun.awt.motif.CharToByteX11JIS0208 ++ ++fontcharset.sansserif.0=sun.io.CharToByteISO8859_1 ++fontcharset.sansserif.1=sun.awt.motif.CharToByteX11JIS0208 ++fontcharset.sansserif.2=sun.awt.motif.CharToByteX11JIS0208 ++ ++fontcharset.monospaced.0=sun.io.CharToByteISO8859_1 ++fontcharset.monospaced.1=sun.awt.motif.CharToByteX11JIS0208 ++fontcharset.monospaced.2=sun.awt.motif.CharToByteX11JIS0208 ++ ++fontcharset.dialog.0=sun.io.CharToByteISO8859_1 ++fontcharset.dialog.1=sun.awt.motif.CharToByteX11JIS0208 ++fontcharset.dialog.2=sun.awt.motif.CharToByteX11JIS0208 ++ ++fontcharset.dialoginput.0=sun.io.CharToByteISO8859_1 ++fontcharset.dialoginput.1=sun.awt.motif.CharToByteX11JIS0208 ++fontcharset.dialoginput.2=sun.awt.motif.CharToByteX11JIS0208 ++ ++# Exclusion Ranges ++# ++ ++# XFontSet Information ++# ++fontset.serif.plain=\ ++-sony-fixed-medium-r-normal--*-%d-*-*-c-*-jisx0201.1976-0,\ ++-b&h-lucidabright-medium-r-normal--*-%d-*-*-p-*-iso8859-1,\ ++-kochi-mincho-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0,\ ++-sazanami-mincho-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++ ++fontset.serif.italic=\ ++-sony-fixed-medium-r-normal--*-%d-*-*-c-*-jisx0201.1976-0,\ ++-b&h-lucidabright-medium-i-normal--*-%d-*-*-p-*-iso8859-1,\ ++-kochi-mincho-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0,\ ++-sazanami-mincho-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++ ++fontset.serif.bold=\ ++-sony-fixed-medium-r-normal--*-%d-*-*-c-*-jisx0201.1976-0,\ ++-b&h-lucidabright-demibold-r-normal--*-%d-*-*-p-*-iso8859-1,\ ++-kochi-mincho-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0,\ ++-sazanami-mincho-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++ ++fontset.serif.bolditalic=\ ++-sony-fixed-medium-r-normal--*-%d-*-*-c-*-jisx0201.1976-0,\ ++-b&h-lucidabright-demibold-i-normal--*-%d-*-*-p-*-iso8859-1,\ ++-kochi-mincho-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0,\ ++-sazanami-mincho-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++ ++fontset.sansserif.plain=\ ++-sony-fixed-medium-r-normal--*-%d-*-*-c-*-jisx0201.1976-0,\ ++-b&h-lucida-medium-i-normal-sans-*-%d-*-*-p-*-iso8859-1,\ ++-kochi-gothic-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0,\ ++-sazanami-gothic-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++ ++fontset.sansserif.italic=\ ++-sony-fixed-medium-r-normal--*-%d-*-*-c-*-jisx0201.1976-0,\ ++-b&h-lucida-medium-i-normal-sans-*-%d-*-*-p-*-iso8859-1,\ ++-kochi-gothic-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0,\ ++-sazanami-gothic-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++ ++fontset.sansserif.bold=\ ++-sony-fixed-medium-r-normal--*-%d-*-*-c-*-jisx0201.1976-0,\ ++-b&h-lucida-bold-r-normal-sans-*-%d-*-*-p-*-iso8859-1,\ ++-kochi-gothic-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0,\ ++-sazanami-gothic-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++ ++fontset.sansserif.bolditalic=\ ++-sony-fixed-medium-r-normal--*-%d-*-*-c-*-jisx0201.1976-0,\ ++-b&h-lucida-bold-i-normal-sans-*-%d-*-*-p-*-iso8859-1,\ ++-kochi-gothic-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0,\ ++-sazanami-gothic-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++ ++fontset.monospaced.plain=\ ++-sony-fixed-medium-r-normal--*-%d-*-*-c-*-jisx0201.1976-0,\ ++-b&h-lucidatypewriter-medium-r-normal-sans-*-%d-*-*-m-*-iso8859-1,\ ++-kochi-mincho-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0,\ ++-sazanami-mincho-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++ ++fontset.monospaced.italic=\ ++-sony-fixed-medium-r-normal--*-%d-*-*-c-*-jisx0201.1976-0,\ ++-b&h-lucidatypewriter-medium-r-normal-sans-*-%d-*-*-m-*-iso8859-1,\ ++-kochi-mincho-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0,\ ++-sazanami-mincho-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++ ++fontset.monospaced.bold=\ ++-sony-fixed-medium-r-normal--*-%d-*-*-c-*-jisx0201.1976-0,\ ++-b&h-lucidatypewriter-bold-r-normal-sans-*-%d-*-*-m-*-iso8859-1,\ ++-kochi-mincho-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0,\ ++-sazanami-mincho-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++ ++fontset.monospaced.bolditalic=\ ++-sony-fixed-medium-r-normal--*-%d-*-*-c-*-jisx0201.1976-0,\ ++-b&h-lucidatypewriter-bold-r-normal-sans-*-%d-*-*-m-*-iso8859-1,\ ++-kochi-mincho-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0,\ ++-sazanami-mincho-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++ ++fontset.dialog.italic=\ ++-sony-fixed-medium-r-normal--*-%d-*-*-c-*-jisx0201.1976-0,\ ++-b&h-lucida-medium-i-normal-sans-*-%d-*-*-p-*-iso8859-1,\ ++-kochi-gothic-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0,\ ++-sazanami-gothic-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++ ++fontset.dialog.bold=\ ++-sony-fixed-medium-r-normal--*-%d-*-*-c-*-jisx0201.1976-0,\ ++-b&h-lucida-bold-r-normal-sans-*-%d-*-*-p-*-iso8859-1,\ ++-kochi-gothic-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0,\ ++-sazanami-gothic-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++ ++fontset.dialog.bolditalic=\ ++-sony-fixed-medium-r-normal--*-%d-*-*-c-*-jisx0201.1976-0,\ ++-b&h-lucida-bold-i-normal-sans-*-%d-*-*-p-*-iso8859-1,\ ++-kochi-gothic-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0,\ ++-sazanami-gothic-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++ ++fontset.dialog.plain=\ ++-sony-fixed-medium-r-normal--*-%d-*-*-c-*-jisx0201.1976-0,\ ++-b&h-lucida-medium-r-normal-sans-*-%d-*-*-p-*-iso8859-1,\ ++-kochi-gothic-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0,\ ++-sazanami-gothic-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++ ++fontset.dialoginput.italic=\ ++-sony-fixed-medium-r-normal--*-%d-*-*-c-*-jisx0201.1976-0,\ ++-b&h-lucidatypewriter-medium-r-normal-sans-*-%d-*-*-m-*-iso8859-1,\ ++-kochi-mincho-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0,\ ++-sazanami-mincho-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++ ++fontset.dialoginput.bold=\ ++-sony-fixed-medium-r-normal--*-%d-*-*-c-*-jisx0201.1976-0,\ ++-b&h-lucidatypewriter-bold-r-normal-sans-*-%d-*-*-m-*-iso8859-1,\ ++-kochi-mincho-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0,\ ++-sazanami-mincho-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++ ++fontset.dialoginput.bolditalic=\ ++-sony-fixed-medium-r-normal--*-%d-*-*-c-*-jisx0201.1976-0,\ ++-b&h-lucidatypewriter-bold-r-normal-sans-*-%d-*-*-m-*-iso8859-1,\ ++-kochi-mincho-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0,\ ++-sazanami-mincho-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++ ++fontset.dialoginput.plain=\ ++-sony-fixed-medium-r-normal--*-%d-*-*-c-*-jisx0201.1976-0,\ ++-b&h-lucidatypewriter-medium-r-normal-sans-*-%d-*-*-m-*-iso8859-1,\ ++-kochi-mincho-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0,\ ++-sazanami-mincho-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++ ++fontset.default=\ ++-sony-fixed-medium-r-normal--*-%d-*-*-c-*-jisx0201.1976-0,\ ++-b&h-lucida-medium-r-normal-sans-*-%d-*-*-p-*-iso8859-1,\ ++-kochi-mincho-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0,\ ++-sazanami-mincho-medium-r-normal--*-%d-*-*-c-*-jisx0208.1983-0 ++ diff --cc debian/generate-debian-orig.sh index 000000000,000000000..9ee8ac31d new file mode 100644 --- /dev/null +++ b/debian/generate-debian-orig.sh @@@ -1,0 -1,0 +1,68 @@@ ++#!/bin/sh ++ ++tarballs="corba.tar.gz hotspot.tar.gz jaxp.tar.gz jaxws.tar.gz jdk.tar.gz langtools.tar.gz openjdk.tar.gz" ++# AArch64 hotspot ++aarch64_hsname=f50993b6c38d ++tarballs="$tarballs $aarch64_hsname.tar.gz" ++jamvmtb=jamvm-ac22c9948434e528ece451642b4ebde40953ee7e.tar.gz ++tarballdir=7u55 ++version=7u55-2.4.7 ++base=openjdk-7 ++pkgdir=$base-$version ++origtar=${base}_${version}.orig.tar.gz ++ ++icedtea_checkout=icedtea7-2.4.7 ++debian_checkout=openjdk7 ++ ++if [ -d $pkgdir ]; then ++ echo directory $pkgdir already exists ++ exit 1 ++fi ++ ++if [ -d $pkgdir.orig ]; then ++ echo directory $pkgdir.orig already exists ++ exit 1 ++fi ++ ++if [ -f $origtar ]; then ++ echo "Using existing $origtar" ++ tar xf $origtar ++ if [ -d $pkgdir.orig ]; then ++ mv $pkgdir.orig $pkgdir ++ fi ++ tar -c -f - -C $icedtea_checkout . | tar -x -f - -C $pkgdir ++ rm -rf $pkgdir/.hg ++else ++ echo "Creating new $pkgdir.orig/" ++ rm -rf $pkgdir.orig ++ mkdir -p $pkgdir.orig ++ case "$base" in ++ openjdk*) ++ for i in $tarballs; do ++ cp -p $tarballdir/$i $pkgdir.orig/ ++ done ++ cp -p $tarballdir/$jamvmtb $pkgdir.orig/ ++ ;; ++ esac ++ tar -c -f - -C $icedtea_checkout . | tar -x -f - -C $pkgdir.orig ++ ( ++ cd $pkgdir.orig ++ sh autogen.sh ++ rm -rf autom4te.cache ++ ) ++ cp -a $pkgdir.orig $pkgdir ++ rm -rf $pkgdir.orig/.hg ++fi ++ ++echo "Build debian diff in $pkgdir/" ++cp -a $debian_checkout $pkgdir/debian ++( ++ cd $pkgdir ++ #bash debian/update-shasum.sh ++ #bash debian/update-hgrev.sh ++ ls ++ patch -p1 < debian/patches/icedtea-patch.diff ++ sh autogen.sh ++ rm -rf autom4te.cache ++ echo "aarch64 http://openjdk.linaro.org/hotspot $aarch64_hsname $(sha256sum $aarch64_hsname.tar.gz) $aarch64_hsname.tar.gz" >> hotspot.map ++) diff --cc debian/generate-dfsg-zip.sh index 000000000,000000000..200be9485 new file mode 100644 --- /dev/null +++ b/debian/generate-dfsg-zip.sh @@@ -1,0 -1,0 +1,60 @@@ ++#!/bin/sh ++ ++if [ ! -f "$1" ]; then ++ echo "does not exist: $1" ++ exit 1 ++fi ++ ++jdkdir=$(tar tf $1 | head -1 | sed 's,/.*,,') ++echo $jdkdir ++ ++dist=$(lsb_release -is) ++ ++echo "not needed anymore. 2.4.4 has both jdk and langtools cleaned up" ++exit 1 ++ ++# Untar openjdk source zip. ++rm -rf openjdk ++case "$1" in ++ *.zip) unzip -q -x $1 ;; ++ *.tar*) tar xf $1;; ++esac ++ ++case "$1" in ++ *jdk*) ++ echo "Cleanup jdk tarball..." ++ # Remove J2DBench sources, some of which have questionable license ++ # headers. ++ rm -rf \ ++ $jdkdir/src/share/demo/java2d/J2DBench ++ ++ # binary files ++ rm -f \ ++ $jdkdir/test/sun/net/idn/*.spp ++ ++ rm -rf \ ++ $jdkdir/test/sun/security/pkcs11/nss/lib/* ++ ++ rm -f \ ++ $jdkdir/test/java/util/Locale/data/deflocale.sh \ ++ $jdkdir/test/java/util/Locale/data/deflocale.rhel5 \ ++ $jdkdir/test/java/util/Locale/data/deflocale.rhel5.fmtasdefault \ ++ $jdkdir/test/java/util/Locale/data/deflocale.sol10.fmtasdefault \ ++ $jdkdir/test/java/util/Locale/data/deflocale.win7 \ ++ $jdkdir/test/java/util/Locale/data/deflocale.win7.fmtasdefault ++;; ++ ++ *langtools*) ++ echo "Cleanup langtools tarball..." ++ rm -rf $jdkdir/test/tools/javac/T5090006/broken.jar ++;; ++ ++esac ++ ++# Create new zip with new name. ++ ++NEW_ZIP=$(echo $1 | sed -e 's/\.tar.gz/-dfsg.tar.gz/;s/\.zip/-dfsg.tar.gz/') ++GZIP=-9v tar -cz -f $NEW_ZIP $jdkdir ++ ++# Remove old unzipped openjdk dir. ++rm -rf $jdkdir diff --cc debian/jvm.cfg-default index 000000000,000000000..d886ba82a new file mode 100644 --- /dev/null +++ b/debian/jvm.cfg-default @@@ -1,0 -1,0 +1,5 @@@ ++# jvm.cfg-default - default jvm configuration which is used if no jvm.cfg ++# is found. After unpacking the file jvm.cfg doesn't exist yet (config file), ++# in this case use this file instead, so that the VM can be used before ++# configuration of the runtime package. ++-server KNOWN diff --cc debian/mauve_tests index 000000000,000000000..edba589aa new file mode 100644 --- /dev/null +++ b/debian/mauve_tests @@@ -1,0 -1,0 +1,2570 @@@ ++javax.xml.xpath.XPath ++javax.xml.parsers.DocumentBuilder.PR27864 ++javax.xml.parsers.DocumentBuilder.parseSimpleXML ++javax.crypto.spec.TestOfPBEKeySpec ++javax.crypto.spec.TestOfSecretKeySpec ++javax.accessibility.AccessibleContext.getAccessibleRelationSet ++javax.imageio.plugins.jpeg.TestJPEGImageReadParam ++javax.imageio.plugins.jpeg.TestJPEGHuffmanTable ++javax.imageio.plugins.jpeg.TestJPEGQTable ++javax.imageio.stream.IIOByteBuffer.setOffset ++javax.imageio.stream.IIOByteBuffer.setLength ++javax.imageio.stream.IIOByteBuffer.setData ++javax.imageio.spi.ServiceRegistry.getServiceProviderByClass ++javax.imageio.spi.ServiceRegistry.deregisterAll ++javax.imageio.spi.ServiceRegistry.getCategories ++javax.imageio.spi.ServiceRegistry.lookupProviders ++javax.imageio.spi.ServiceRegistry.registerServiceProvider ++javax.imageio.spi.IIOServiceProvider.getVersion ++javax.imageio.spi.IIOServiceProvider.getVendorName ++javax.imageio.spi.ImageOutputStreamSpi.ImageOutputStreamSpi ++javax.imageio.spi.ImageOutputStreamSpi.needsCacheFile ++javax.imageio.spi.ImageOutputStreamSpi.canUseCacheFile ++javax.imageio.spi.ImageReaderWriterSpi.getNativeImageMetadataFormatName ++javax.imageio.spi.ImageReaderWriterSpi.getFileSuffixes ++javax.imageio.spi.ImageReaderWriterSpi.getExtraImageMetadataFormatNames ++javax.imageio.spi.ImageReaderWriterSpi.getPluginClassName ++javax.imageio.spi.ImageReaderWriterSpi.getNativeStreamMetadataFormatName ++javax.imageio.spi.ImageReaderWriterSpi.getExtraStreamMetadataFormatNames ++javax.imageio.spi.ImageReaderWriterSpi.getMIMETypes ++javax.imageio.spi.ImageReaderWriterSpi.isStandardImageMetadataFormatSupported ++javax.imageio.spi.ImageReaderWriterSpi.getFormatNames ++javax.imageio.spi.ImageReaderWriterSpi.isStandardStreamMetadataFormatSupported ++javax.imageio.spi.ImageInputStreamSpi.ImageInputStreamSpi ++javax.imageio.spi.ImageInputStreamSpi.needsCacheFile ++javax.imageio.spi.ImageInputStreamSpi.canUseCacheFile ++javax.imageio.spi.ImageTranscoderSpi.ImageTranscoderSpi ++javax.imageio.spi.IIORegistry.getDefaultInstance ++javax.naming.CompoundName.simple ++javax.naming.directory.BasicAttribute.Enumerate ++javax.naming.CompositeName.composite ++javax.management.ObjectName.apply ++javax.management.ObjectName.Parsing ++javax.management.ObjectName.ParsingJDK6 ++javax.management.ObjectName.applyJDK6 ++javax.management.openmbean.ArrayType.IsValue ++javax.management.openmbean.ArrayType.Constructor2 ++javax.management.openmbean.ArrayType.Equals ++javax.management.openmbean.ArrayType.IsPrimitiveArray ++javax.management.openmbean.ArrayType.GetArrayType ++javax.management.openmbean.ArrayType.HashCode ++javax.management.openmbean.ArrayType.GetPrimitiveArrayType ++javax.management.openmbean.ArrayType.Constructor1 ++javax.management.openmbean.CompositeDataInvocationHandler.Test ++javax.management.MBeanServerPermission.Constructor ++javax.security.auth.login.TestOfPR25202 ++javax.print.SimpleDoc.constructor ++javax.print.SimpleDoc.getStreamForBytes ++javax.print.SimpleDoc.getAttributes ++javax.print.SimpleDoc.getReaderForText ++javax.print.DocFlavor.parseMimeType ++javax.print.attribute.TextSyntax.constructors ++javax.print.attribute.ResolutionSyntax.simple ++javax.print.attribute.EnumSyntax.serialize ++javax.print.attribute.EnumSyntax.equals ++javax.print.attribute.AttributeSetUtilities.simple ++javax.print.attribute.Size2DSyntax.simple ++javax.print.attribute.SetOfIntegerSyntax.Simple ++javax.print.attribute.HashAttributeSet.populatedSet ++javax.print.attribute.HashAttributeSet.nullTests ++javax.print.attribute.HashAttributeSet.emptySet ++javax.print.attribute.standard.MediaSize.userClass ++javax.swing.JWindow.isRootPaneCheckingEnabled ++javax.swing.JSplitPane.setComponent ++javax.swing.JSplitPane.setResizeWeight ++javax.swing.JSplitPane.getDividerLocation ++javax.swing.JSplitPane.getInputMap ++javax.swing.JSplitPane.getActionMap ++javax.swing.JSplitPane.paramString ++javax.swing.JSplitPane.Constructor ++javax.swing.JOptionPane.getInputMap ++javax.swing.JTabbedPane.getInputMap ++javax.swing.JTabbedPane.Mnemonic ++javax.swing.JTabbedPane.remove ++javax.swing.JToggleButton.constructor ++javax.swing.JToggleButton.actionEvent ++javax.swing.JToggleButton.isFocusable ++javax.swing.JToggleButton.uidelegate ++javax.swing.JToggleButton.getActionCommand ++javax.swing.JToggleButton.model ++javax.swing.JToggleButton.click ++javax.swing.text.View.getResizeWeight ++javax.swing.text.View.getMaximumSpan ++javax.swing.text.View.getMinimumSpan ++javax.swing.text.View.getAlignment ++javax.swing.text.GapContent.getString ++javax.swing.text.GapContent.GapContentTest ++javax.swing.text.GapContent.length ++javax.swing.text.GapContent.constructors ++javax.swing.text.GapContent.insertString ++javax.swing.text.GapContent.getChars ++javax.swing.text.GapContent.PositionTest ++javax.swing.text.GapContent.remove ++javax.swing.text.ElementIterator.ElementIteratorTest ++javax.swing.text.AbstractDocument.BranchElement.getStartOffset ++javax.swing.text.AbstractDocument.BranchElement.getElementIndexNullPointer ++javax.swing.text.AbstractDocument.BranchElement.BranchElementTest ++javax.swing.text.AbstractDocument.BranchElement.getElementIndex ++javax.swing.text.AbstractDocument.LeafElement.getStartOffset ++javax.swing.text.AbstractDocument.getDocumentProperties ++javax.swing.text.AbstractDocument.ElementChange2 ++javax.swing.text.AbstractDocument.AbstractDocumentTest ++javax.swing.text.AbstractDocument.ElementChange ++javax.swing.text.AbstractDocument.filterTest ++javax.swing.text.MaskFormatter.MaskFormatterTest ++javax.swing.text.DefaultFormatter.getValueClass ++javax.swing.text.PlainDocument.getRootElements ++javax.swing.text.PlainDocument.getDocumentProperties ++javax.swing.text.PlainDocument.removeJoinesLines ++javax.swing.text.PlainDocument.multipleLeafs ++javax.swing.text.PlainDocument.getText ++javax.swing.text.PlainDocument.getLength ++javax.swing.text.PlainDocument.createPosition ++javax.swing.text.PlainDocument.insertUpdate ++javax.swing.text.PlainDocument.insertString ++javax.swing.text.PlainDocument.PlainDocumentTest ++javax.swing.text.AttributeSet.isEqual ++javax.swing.text.Segment.clone ++javax.swing.text.Segment.first ++javax.swing.text.Segment.setIndex ++javax.swing.text.Segment.last ++javax.swing.text.Segment.getIndex ++javax.swing.text.Segment.current ++javax.swing.text.Segment.next ++javax.swing.text.Segment.isPartialReturn ++javax.swing.text.Segment.getEndIndex ++javax.swing.text.Segment.setPartialReturn ++javax.swing.text.Segment.getBeginIndex ++javax.swing.text.Segment.constructors ++javax.swing.text.Segment.toString ++javax.swing.text.Segment.previous ++javax.swing.text.TextAction.augmentList ++javax.swing.text.TabSet.getTab ++javax.swing.text.TabSet.constructor ++javax.swing.text.TabSet.getTabCount ++javax.swing.text.TabSet.getTabIndex ++javax.swing.text.TabSet.getTabIndexAfter ++javax.swing.text.TabSet.equals ++javax.swing.text.TabSet.toString ++javax.swing.text.html.HTML.HTML_Test ++javax.swing.text.html.HTMLDocument.FindById ++javax.swing.text.html.parser.Entity.Entity_Test ++javax.swing.text.html.parser.TagElement.TagElement_Test ++javax.swing.text.html.parser.AttributeList.AttributeList_test ++javax.swing.text.html.parser.Element.Element_Test ++javax.swing.text.html.parser.ParserDelegator.Entities ++javax.swing.text.html.parser.ParserDelegator.Text4 ++javax.swing.text.html.parser.ParserDelegator.randomTables ++javax.swing.text.html.parser.ParserDelegator.SimpleParsing ++javax.swing.text.html.parser.ParserDelegator.Text3 ++javax.swing.text.html.parser.ParserDelegator.Text ++javax.swing.text.html.parser.ParserDelegator.eolnNorification ++javax.swing.text.html.parser.ParserDelegator.tokenLocations ++javax.swing.text.html.parser.DTD.DTD_test ++javax.swing.text.BoxView.spans ++javax.swing.text.StyleConstants.setTabSet ++javax.swing.text.StyleConstants.setUnderline ++javax.swing.text.StyleConstants.getLeftIndent ++javax.swing.text.StyleConstants.getIcon ++javax.swing.text.StyleConstants.setFontFamily ++javax.swing.text.StyleConstants.isBold ++javax.swing.text.StyleConstants.setLineSpacing ++javax.swing.text.StyleConstants.isSubscript ++javax.swing.text.StyleConstants.setItalic ++javax.swing.text.StyleConstants.getFontSize ++javax.swing.text.StyleConstants.setComponent ++javax.swing.text.StyleConstants.setBidiLevel ++javax.swing.text.StyleConstants.getForeground ++javax.swing.text.StyleConstants.setSuperscript ++javax.swing.text.StyleConstants.getSpaceAbove ++javax.swing.text.StyleConstants.getLineSpacing ++javax.swing.text.StyleConstants.setSpaceBelow ++javax.swing.text.StyleConstants.setBackground ++javax.swing.text.StyleConstants.getRightIndent ++javax.swing.text.StyleConstants.getFirstLineIndent ++javax.swing.text.StyleConstants.isItalic ++javax.swing.text.StyleConstants.getTabSet ++javax.swing.text.StyleConstants.setIcon ++javax.swing.text.StyleConstants.isUnderline ++javax.swing.text.StyleConstants.setBold ++javax.swing.text.StyleConstants.setAlignment ++javax.swing.text.StyleConstants.setSubscript ++javax.swing.text.StyleConstants.setRightIndent ++javax.swing.text.StyleConstants.isSuperscript ++javax.swing.text.StyleConstants.getSpaceBelow ++javax.swing.text.StyleConstants.setFontSize ++javax.swing.text.StyleConstants.getFontFamily ++javax.swing.text.StyleConstants.constants ++javax.swing.text.StyleConstants.setStrikeThrough ++javax.swing.text.StyleConstants.setForeground ++javax.swing.text.StyleConstants.getAlignment ++javax.swing.text.StyleConstants.setLeftIndent ++javax.swing.text.StyleConstants.getBackground ++javax.swing.text.StyleConstants.getComponent ++javax.swing.text.StyleConstants.getBidiLevel ++javax.swing.text.StyleConstants.setFirstLineIndent ++javax.swing.text.StyleConstants.setSpaceAbove ++javax.swing.text.StyleConstants.isStrikeThrough ++javax.swing.text.DefaultStyledDocument.ElementBuffer.ElementStructure3 ++javax.swing.text.DefaultStyledDocument.ElementBuffer.ElementStructure1 ++javax.swing.text.DefaultStyledDocument.ElementBuffer.StyledDocument6 ++javax.swing.text.DefaultStyledDocument.ElementBuffer.ElementStructure7 ++javax.swing.text.DefaultStyledDocument.ElementBuffer.ElementStructure2 ++javax.swing.text.DefaultStyledDocument.ElementBuffer.StyledDocument1 ++javax.swing.text.DefaultStyledDocument.ElementBuffer.ElementStructure4 ++javax.swing.text.DefaultStyledDocument.ElementBuffer.ElementStructure6 ++javax.swing.text.DefaultStyledDocument.ElementBuffer.StyledDocument3 ++javax.swing.text.DefaultStyledDocument.ElementBuffer.ElementStructure5 ++javax.swing.text.DefaultStyledDocument.ElementBuffer.insert ++javax.swing.text.DefaultStyledDocument.ElementBuffer.ElementStructure8 ++javax.swing.text.DefaultStyledDocument.ElementBuffer.StyledDocument2 ++javax.swing.text.DefaultStyledDocument.ElementBuffer.StyledDocument5 ++javax.swing.text.DefaultStyledDocument.ElementBuffer.StyledDocument4 ++javax.swing.text.DefaultStyledDocument.Insert ++javax.swing.text.DefaultStyledDocument.insertString ++javax.swing.text.DefaultStyledDocument.Create ++javax.swing.text.SimpleAttributeSet.removeAttributesOnlyIfMatch ++javax.swing.text.SimpleAttributeSet.clone ++javax.swing.text.SimpleAttributeSet.EMPTY ++javax.swing.text.SimpleAttributeSet.containsAttributes ++javax.swing.text.SimpleAttributeSet.containsChecksParent ++javax.swing.text.SimpleAttributeSet.containsAttribute ++javax.swing.text.SimpleAttributeSet.getResolveParent ++javax.swing.text.SimpleAttributeSet.removeAttribute ++javax.swing.text.SimpleAttributeSet.copyAttributes ++javax.swing.text.SimpleAttributeSet.equals ++javax.swing.text.SimpleAttributeSet.removeAttributes ++javax.swing.text.SimpleAttributeSet.getAttribute ++javax.swing.text.SimpleAttributeSet.isEqual ++javax.swing.text.SimpleAttributeSet.getAttributeCount ++javax.swing.text.SimpleAttributeSet.constructors ++javax.swing.text.SimpleAttributeSet.isDefined ++javax.swing.text.SimpleAttributeSet.addAttribute ++javax.swing.text.SimpleAttributeSet.getAttributeNames ++javax.swing.text.SimpleAttributeSet.setResolveParent ++javax.swing.text.SimpleAttributeSet.isEmpty ++javax.swing.text.SimpleAttributeSet.addAttributes ++javax.swing.text.SimpleAttributeSet.nullValue ++javax.swing.text.ZoneView.getMaximumZoneSize ++javax.swing.text.ZoneView.createZone ++javax.swing.text.ZoneView.constructor ++javax.swing.text.ZoneView.zoneWasLoaded ++javax.swing.text.ZoneView.isZoneLoaded ++javax.swing.text.ZoneView.loadChildren ++javax.swing.text.ZoneView.setMaxZonesLoaded ++javax.swing.text.ZoneView.unloadZone ++javax.swing.text.ZoneView.setMaximumZoneSize ++javax.swing.text.FlowView.FlowStrategy.adjustRow ++javax.swing.text.FlowView.getFlowAxis ++javax.swing.text.InternationalFormatter.InternationalFormatterTest ++javax.swing.text.Utilities.getNextWord ++javax.swing.text.Utilities.getBreakLocation ++javax.swing.text.Utilities.getPreviousWord ++javax.swing.text.Utilities.getWordStart ++javax.swing.text.Utilities.getTabbedTextOffset ++javax.swing.text.TabStop.constructors ++javax.swing.text.TabStop.toString ++javax.swing.text.StringContent.insertUndo ++javax.swing.text.StringContent.BadLocationExceptionTest ++javax.swing.text.StringContent.stickyPosition ++javax.swing.text.StringContent.length ++javax.swing.text.StringContent.removeUndo ++javax.swing.text.StringContent.constructors ++javax.swing.text.StringContent.insertString ++javax.swing.text.StringContent.StringContentTest ++javax.swing.text.StyledEditorKit.createInputAttributesTest ++javax.swing.text.StyleContext.NamedStyleInit ++javax.swing.text.StyleContext.NamedStyleSetResolveParent ++javax.swing.text.StyleContext.addStyle ++javax.swing.text.StyleContext.addAttribute ++javax.swing.JScrollBar.getAccessibleContext ++javax.swing.JScrollBar.getInputMap ++javax.swing.JScrollBar.constructors ++javax.swing.JScrollBar.getActionMap ++javax.swing.JScrollBar.paramString ++javax.swing.plaf.basic.BasicButtonUI.getTextShiftOffset ++javax.swing.plaf.basic.BasicButtonUI.getPropertyPrefix ++javax.swing.plaf.basic.BasicButtonUI.defaultTextIconGap ++javax.swing.plaf.basic.BasicButtonUI.setTextShiftOffset ++javax.swing.plaf.basic.BasicButtonUI.defaultTextShiftOffset ++javax.swing.plaf.basic.BasicButtonUI.clearTextShiftOffset ++javax.swing.plaf.basic.BasicRadioButtonMenuItemUI.getPropertyPrefix ++javax.swing.plaf.basic.BasicToggleButtonUI.getPropertyPrefix ++javax.swing.plaf.basic.BasicSeparatorUI.getPreferredSize ++javax.swing.plaf.basic.BasicSeparatorUI.getMinimumSize ++javax.swing.plaf.basic.BasicSeparatorUI.getMaximumSize ++javax.swing.plaf.basic.BasicLookAndFeel.initSystemColorDefaults ++javax.swing.plaf.basic.BasicCheckBoxMenuItemUI.getPropertyPrefix ++javax.swing.plaf.basic.BasicHTML.isHTMLString ++javax.swing.plaf.basic.BasicScrollBarUI.layoutContainer ++javax.swing.plaf.basic.BasicScrollBarUI.getPreferredSize ++javax.swing.plaf.basic.BasicScrollBarUI.constructor ++javax.swing.plaf.basic.BasicScrollBarUI.getMinimumSize ++javax.swing.plaf.basic.BasicScrollBarUI.createDecreaseButton ++javax.swing.plaf.basic.BasicScrollBarUI.getMaximumSize ++javax.swing.plaf.basic.BasicScrollBarUI.getMaximumThumbSize ++javax.swing.plaf.basic.BasicScrollBarUI.installDefaults ++javax.swing.plaf.basic.BasicScrollBarUI.createIncreaseButton ++javax.swing.plaf.basic.BasicScrollBarUI.installComponents ++javax.swing.plaf.basic.BasicScrollBarUI.getMinimumThumbSize ++javax.swing.plaf.basic.BasicInternalFrameTitlePane.MaximizeAction.constructor ++javax.swing.plaf.basic.BasicInternalFrameTitlePane.IconifyAction.constructor ++javax.swing.plaf.basic.BasicInternalFrameTitlePane.CloseAction.constructor ++javax.swing.plaf.basic.BasicInternalFrameTitlePane.RestoreAction.constructor ++javax.swing.plaf.basic.BasicInternalFrameTitlePane.MoveAction.constructor ++javax.swing.plaf.basic.BasicInternalFrameTitlePane.SizeAction.constructor ++javax.swing.plaf.basic.BasicPasswordFieldUI.getPropertyPrefix ++javax.swing.plaf.basic.BasicSplitPaneUI.BasicHorizontalLayoutManager.layoutContainer ++javax.swing.plaf.basic.BasicSplitPaneUI.getDividerLocation ++javax.swing.plaf.basic.BasicArrowButton.getPreferredSize ++javax.swing.plaf.basic.BasicArrowButton.isFocusTraversable ++javax.swing.plaf.basic.BasicArrowButton.getMinimumSize ++javax.swing.plaf.basic.BasicArrowButton.isFocusable ++javax.swing.plaf.basic.BasicArrowButton.getMaximumSize ++javax.swing.plaf.basic.BasicSliderUI.xPositionForValue ++javax.swing.plaf.basic.BasicSliderUI.getPreferredSize ++javax.swing.plaf.basic.BasicSliderUI.getMinimumSize ++javax.swing.plaf.basic.BasicSliderUI.valueForYPosition ++javax.swing.plaf.basic.BasicSliderUI.getMaximumSize ++javax.swing.plaf.basic.BasicSliderUI.getThumbSize ++javax.swing.plaf.basic.BasicSliderUI.valueForXPosition ++javax.swing.plaf.basic.BasicSliderUI.constructors ++javax.swing.plaf.basic.BasicSliderUI.yPositionForValue ++javax.swing.plaf.basic.BasicTextFieldUI.getPropertyPrefix ++javax.swing.plaf.basic.BasicTextPaneUI.getPropertyPrefix ++javax.swing.plaf.basic.BasicMenuItemUI.getPropertyPrefix ++javax.swing.plaf.basic.BasicFormattedTextFieldUI.getPropertyPrefix ++javax.swing.plaf.basic.BasicTextAreaUI.getPropertyPrefix ++javax.swing.plaf.basic.BasicComboBoxUI.getDefaultSize ++javax.swing.plaf.basic.BasicComboBoxUI.createEditor ++javax.swing.plaf.basic.BasicComboBoxUI.getMaximumSize ++javax.swing.plaf.basic.BasicListUI.updateLayoutStateNeeded ++javax.swing.plaf.basic.BasicMenuUI.getPropertyPrefix ++javax.swing.plaf.basic.BasicIconFactory.getCheckBoxMenuItemIcon ++javax.swing.plaf.basic.BasicIconFactory.getMenuArrowIcon ++javax.swing.plaf.basic.BasicIconFactory.getCheckBoxIcon ++javax.swing.plaf.basic.BasicFileChooserUI.getFileName ++javax.swing.plaf.basic.BasicFileChooserUI.getApproveButtonText ++javax.swing.plaf.basic.BasicFileChooserUI.constructor ++javax.swing.plaf.basic.BasicFileChooserUI.getApproveButtonMnemonic ++javax.swing.plaf.basic.BasicFileChooserUI.uninstallStrings ++javax.swing.plaf.basic.BasicFileChooserUI.getApproveButton ++javax.swing.plaf.basic.BasicFileChooserUI.getGoHomeAction ++javax.swing.plaf.basic.BasicFileChooserUI.getUpdateAction ++javax.swing.plaf.basic.BasicFileChooserUI.getDirectoryName ++javax.swing.plaf.basic.BasicFileChooserUI.getApproveSelectionAction ++javax.swing.plaf.basic.BasicFileChooserUI.installStrings ++javax.swing.plaf.basic.BasicFileChooserUI.getChangeToParentDirectoryAction ++javax.swing.plaf.basic.BasicFileChooserUI.getCancelSelectionAction ++javax.swing.plaf.basic.BasicFileChooserUI.getFileView ++javax.swing.plaf.basic.BasicFileChooserUI.installIcons ++javax.swing.plaf.basic.BasicFileChooserUI.getNewFolderAction ++javax.swing.plaf.basic.BasicFileChooserUI.getApproveButtonToolTipText ++javax.swing.plaf.basic.BasicFileChooserUI.getDialogTitle ++javax.swing.plaf.basic.BasicCheckBoxUI.getPropertyPrefix ++javax.swing.plaf.basic.BasicComboBoxEditor.removeActionListener ++javax.swing.plaf.basic.BasicComboBoxEditor.addActionListener ++javax.swing.plaf.basic.BasicComboBoxEditor.constructor ++javax.swing.plaf.basic.BasicEditorPaneUI.getPropertyPrefix ++javax.swing.plaf.basic.BasicRootPaneUI.installDefaults ++javax.swing.plaf.basic.BasicRadioButtonUI.getPropertyPrefix ++javax.swing.plaf.ColorUIResource.serialization ++javax.swing.plaf.ColorUIResource.equals ++javax.swing.plaf.ColorUIResource.constructors ++javax.swing.plaf.metal.MetalTheme.MetalThemeTest ++javax.swing.plaf.metal.MetalLookAndFeel.getMenuTextFont ++javax.swing.plaf.metal.MetalLookAndFeel.getDescription ++javax.swing.plaf.metal.MetalLookAndFeel.isNativeLookAndFeel ++javax.swing.plaf.metal.MetalLookAndFeel.getSeparatorBackground ++javax.swing.plaf.metal.MetalLookAndFeel.getPrimaryControl ++javax.swing.plaf.metal.MetalLookAndFeel.getWindowTitleInactiveForeground ++javax.swing.plaf.metal.MetalLookAndFeel.getMenuBackground ++javax.swing.plaf.metal.MetalLookAndFeel.getMenuForeground ++javax.swing.plaf.metal.MetalLookAndFeel.getPrimaryControlShadow ++javax.swing.plaf.metal.MetalLookAndFeel.getWindowTitleForeground ++javax.swing.plaf.metal.MetalLookAndFeel.getControlTextFont ++javax.swing.plaf.metal.MetalLookAndFeel.getControlHighlight ++javax.swing.plaf.metal.MetalLookAndFeel.getPrimaryControlHighlight ++javax.swing.plaf.metal.MetalLookAndFeel.getControlTextColor ++javax.swing.plaf.metal.MetalLookAndFeel.getPrimaryControlDarkShadow ++javax.swing.plaf.metal.MetalLookAndFeel.getSeparatorForeground ++javax.swing.plaf.metal.MetalLookAndFeel.getSubTextFont ++javax.swing.plaf.metal.MetalLookAndFeel.setCurrentTheme ++javax.swing.plaf.metal.MetalLookAndFeel.getControlShadow ++javax.swing.plaf.metal.MetalLookAndFeel.isSupportedLookAndFeel ++javax.swing.plaf.metal.MetalLookAndFeel.getMenuSelectedBackground ++javax.swing.plaf.metal.MetalLookAndFeel.getInactiveControlTextColor ++javax.swing.plaf.metal.MetalLookAndFeel.getWhite ++javax.swing.plaf.metal.MetalLookAndFeel.getUserTextFont ++javax.swing.plaf.metal.MetalLookAndFeel.getAcceleratorForeground ++javax.swing.plaf.metal.MetalLookAndFeel.getControlDisabled ++javax.swing.plaf.metal.MetalLookAndFeel.getID ++javax.swing.plaf.metal.MetalLookAndFeel.getWindowTitleBackground ++javax.swing.plaf.metal.MetalLookAndFeel.getName ++javax.swing.plaf.metal.MetalLookAndFeel.getSystemTextColor ++javax.swing.plaf.metal.MetalLookAndFeel.getWindowTitleFont ++javax.swing.plaf.metal.MetalLookAndFeel.getInactiveSystemTextColor ++javax.swing.plaf.metal.MetalLookAndFeel.getUserTextColor ++javax.swing.plaf.metal.MetalLookAndFeel.getWindowBackground ++javax.swing.plaf.metal.MetalLookAndFeel.getBlack ++javax.swing.plaf.metal.MetalLookAndFeel.getAcceleratorSelectedForeground ++javax.swing.plaf.metal.MetalLookAndFeel.getDesktopColor ++javax.swing.plaf.metal.MetalLookAndFeel.getSystemTextFont ++javax.swing.plaf.metal.MetalLookAndFeel.getControlDarkShadow ++javax.swing.plaf.metal.MetalLookAndFeel.getControlInfo ++javax.swing.plaf.metal.MetalLookAndFeel.getMenuDisabledForeground ++javax.swing.plaf.metal.MetalLookAndFeel.getFocusColor ++javax.swing.plaf.metal.MetalLookAndFeel.getMenuSelectedForeground ++javax.swing.plaf.metal.MetalLookAndFeel.getHighlightedTextColor ++javax.swing.plaf.metal.MetalLookAndFeel.getControl ++javax.swing.plaf.metal.MetalLookAndFeel.getWindowTitleInactiveBackground ++javax.swing.plaf.metal.MetalLookAndFeel.getTextHighlightColor ++javax.swing.plaf.metal.MetalLookAndFeel.getPrimaryControlInfo ++javax.swing.plaf.metal.MetalComboBoxIcon.getIconWidth ++javax.swing.plaf.metal.MetalComboBoxIcon.getIconHeight ++javax.swing.plaf.metal.MetalComboBoxUI.getPreferredSize ++javax.swing.plaf.metal.MetalComboBoxUI.createArrowButton ++javax.swing.plaf.metal.MetalFileChooserUI.getFileName ++javax.swing.plaf.metal.MetalFileChooserUI.createFilterComboBoxModel ++javax.swing.plaf.metal.MetalFileChooserUI.getButtonPanel ++javax.swing.plaf.metal.MetalFileChooserUI.getPreferredSize ++javax.swing.plaf.metal.MetalFileChooserUI.getMinimumSize ++javax.swing.plaf.metal.MetalFileChooserUI.getMaximumSize ++javax.swing.plaf.metal.MetalFileChooserUI.getApproveButton ++javax.swing.plaf.metal.MetalFileChooserUI.setFileName ++javax.swing.plaf.metal.MetalFileChooserUI.getBottomPanel ++javax.swing.plaf.metal.MetalCheckBoxUI.getPropertyPrefix ++javax.swing.plaf.metal.MetalIconFactory.TreeFolderIcon.getAdditionalHeight ++javax.swing.plaf.metal.MetalIconFactory.TreeFolderIcon.getShift ++javax.swing.plaf.metal.MetalIconFactory.PaletteCloseIcon.getIconWidth ++javax.swing.plaf.metal.MetalIconFactory.PaletteCloseIcon.getIconHeight ++javax.swing.plaf.metal.MetalIconFactory.TreeLeafIcon.getAdditionalHeight ++javax.swing.plaf.metal.MetalIconFactory.TreeLeafIcon.getShift ++javax.swing.plaf.metal.MetalIconFactory.getInternalFrameCloseIcon ++javax.swing.plaf.metal.MetalIconFactory.getInternalFrameMaximizeIcon ++javax.swing.plaf.metal.MetalIconFactory.getMenuItemCheckIcon ++javax.swing.plaf.metal.MetalIconFactory.getRadioButtonMenuItemIcon ++javax.swing.plaf.metal.MetalIconFactory.getCheckBoxMenuItemIcon ++javax.swing.plaf.metal.MetalIconFactory.getTreeHardDriveIcon ++javax.swing.plaf.metal.MetalIconFactory.getTreeLeafIcon ++javax.swing.plaf.metal.MetalIconFactory.getMenuArrowIcon ++javax.swing.plaf.metal.MetalIconFactory.getFileChooserNewFolderIcon ++javax.swing.plaf.metal.MetalIconFactory.getTreeControlIcon ++javax.swing.plaf.metal.MetalIconFactory.getFileChooserDetailViewIcon ++javax.swing.plaf.metal.MetalIconFactory.getTreeFloppyDriveIcon ++javax.swing.plaf.metal.MetalIconFactory.getMenuItemArrowIcon ++javax.swing.plaf.metal.MetalIconFactory.getFileChooserListViewIcon ++javax.swing.plaf.metal.MetalIconFactory.getTreeComputerIcon ++javax.swing.plaf.metal.MetalIconFactory.getFileChooserHomeFolderIcon ++javax.swing.plaf.metal.MetalIconFactory.getInternalFrameAltMaximizeIcon ++javax.swing.plaf.metal.MetalIconFactory.getRadioButtonIcon ++javax.swing.plaf.metal.MetalIconFactory.getFileChooserUpFolderIcon ++javax.swing.plaf.metal.MetalIconFactory.getInternalFrameDefaultMenuIcon ++javax.swing.plaf.metal.MetalIconFactory.getInternalFrameMinimizeIcon ++javax.swing.plaf.metal.MetalIconFactory.getCheckBoxIcon ++javax.swing.plaf.metal.MetalIconFactory.getTreeFolderIcon ++javax.swing.plaf.metal.MetalRadioButtonUI.getSelectColor ++javax.swing.plaf.metal.MetalRadioButtonUI.getFocusColor ++javax.swing.plaf.metal.MetalRadioButtonUI.getDisabledTextColor ++javax.swing.plaf.metal.MetalComboBoxButton.setIconOnly ++javax.swing.plaf.metal.MetalComboBoxButton.isFocusTraversable ++javax.swing.plaf.metal.MetalComboBoxButton.setEnabled ++javax.swing.plaf.metal.MetalComboBoxButton.isIconOnly ++javax.swing.plaf.metal.MetalComboBoxButton.getComboBox ++javax.swing.plaf.metal.MetalComboBoxButton.getComboIcon ++javax.swing.plaf.metal.MetalComboBoxButton.setComboBox ++javax.swing.plaf.metal.MetalComboBoxButton.constructors ++javax.swing.plaf.metal.MetalComboBoxButton.setComboIcon ++javax.swing.plaf.metal.DefaultMetalTheme.getMenuTextFont ++javax.swing.plaf.metal.DefaultMetalTheme.DefaultMetalThemeTest ++javax.swing.plaf.metal.DefaultMetalTheme.getControlTextFont ++javax.swing.plaf.metal.DefaultMetalTheme.getSubTextFont ++javax.swing.plaf.metal.DefaultMetalTheme.getUserTextFont ++javax.swing.plaf.metal.DefaultMetalTheme.getName ++javax.swing.plaf.metal.DefaultMetalTheme.getWindowTitleFont ++javax.swing.plaf.metal.DefaultMetalTheme.getSystemTextFont ++javax.swing.plaf.metal.MetalComboBoxEditor.constructor ++javax.swing.plaf.metal.MetalComboBoxEditor.editorBorderInsets ++javax.swing.plaf.metal.MetalToolTipUI.constructor ++javax.swing.plaf.metal.MetalToolTipUI.createUI ++javax.swing.plaf.metal.OceanTheme.OceanThemeTest ++javax.swing.plaf.metal.OceanTheme.addCustomEntriesToTable ++javax.swing.plaf.metal.MetalToggleButtonUI.getSelectColor ++javax.swing.plaf.metal.MetalToggleButtonUI.getFocusColor ++javax.swing.plaf.metal.MetalToggleButtonUI.getDisabledTextColor ++javax.swing.plaf.metal.MetalBorders.ToolBarBorder.getBorderInsets ++javax.swing.plaf.metal.MetalBorders.MenuItemBorder.borderInsets ++javax.swing.plaf.metal.MetalBorders.MenuBarBorder.getBorderInsets ++javax.swing.plaf.metal.MetalBorders.MenuBarBorder.borderInsets ++javax.swing.plaf.metal.MetalBorders.getToggleButtonBorder ++javax.swing.plaf.metal.MetalBorders.getDesktopIconBorder ++javax.swing.plaf.metal.MetalBorders.getButtonBorder ++javax.swing.plaf.metal.MetalBorders.getTextFieldBorder ++javax.swing.plaf.metal.MetalBorders.getTextBorder ++javax.swing.plaf.metal.MetalScrollButton.getButtonWidth ++javax.swing.plaf.metal.MetalScrollButton.getPreferredSize ++javax.swing.plaf.metal.MetalScrollButton.constructor ++javax.swing.plaf.metal.MetalScrollButton.getMinimumSize ++javax.swing.plaf.metal.MetalScrollButton.isFocusable ++javax.swing.plaf.metal.MetalScrollButton.getMaximumSize ++javax.swing.plaf.metal.MetalScrollBarUI.layoutContainer ++javax.swing.plaf.metal.MetalScrollBarUI.getPreferredSize ++javax.swing.plaf.metal.MetalScrollBarUI.constructor ++javax.swing.plaf.metal.MetalScrollBarUI.installDefaults ++javax.swing.plaf.metal.MetalScrollBarUI.getMinimumThumbSize ++javax.swing.plaf.metal.MetalButtonUI.getSelectColor ++javax.swing.plaf.metal.MetalButtonUI.getFocusColor ++javax.swing.plaf.metal.MetalButtonUI.getDisabledTextColor ++javax.swing.plaf.IconUIResource.constructor ++javax.swing.JFrame.isRootPaneCheckingEnabled ++javax.swing.JFrame.glassPaneLayout ++javax.swing.JFrame.constructors ++javax.swing.border.TitledBorder.getTitleFont ++javax.swing.border.TitledBorder.getTitle ++javax.swing.border.TitledBorder.setTitleJustification ++javax.swing.border.TitledBorder.getBorder ++javax.swing.border.TitledBorder.getBorderInsets ++javax.swing.border.TitledBorder.setTitleColor ++javax.swing.border.TitledBorder.setTitle ++javax.swing.border.TitledBorder.setTitlePosition ++javax.swing.border.TitledBorder.getTitleJustification ++javax.swing.border.TitledBorder.setBorder ++javax.swing.border.TitledBorder.getTitleColor ++javax.swing.border.TitledBorder.isBorderOpaque ++javax.swing.border.TitledBorder.constructors ++javax.swing.border.TitledBorder.setTitleFont ++javax.swing.border.TitledBorder.getTitlePosition ++javax.swing.border.CompoundBorder.isBorderOpaque ++javax.swing.JTable.AccessibleJTable.AccessibleJTableHeaderCell.getAccessibleRole ++javax.swing.JTable.AccessibleJTable.AccessibleJTableCell.getAccessibleRole ++javax.swing.JTable.AccessibleJTable.getAccessibleColumnHeader ++javax.swing.JTable.setRowSelectionAllowed ++javax.swing.JTable.getColumn ++javax.swing.JTable.getRowHeight ++javax.swing.JTable.convertColumnIndexToModel ++javax.swing.JTable.createDefaultDataModel ++javax.swing.JTable.getCellEditor ++javax.swing.JTable.setColumnSelectionAllowed ++javax.swing.JTable.setAutoCreateColumnsFromModel ++javax.swing.JTable.setRowHeight ++javax.swing.JTable.createDefaultSelectionModel ++javax.swing.JTable.getAccessibleContext ++javax.swing.JTable.convertColumnIndexToView ++javax.swing.JTable.isCellEditable ++javax.swing.JTable.getInputMap ++javax.swing.JTable.getCellRect ++javax.swing.JTable.isRowSelected ++javax.swing.JTable.getModel ++javax.swing.JTable.initializeLocalVars ++javax.swing.JTable.createDefaultColumnsFromModel ++javax.swing.JTable.getAutoCreateColumnsFromModel ++javax.swing.JTable.isColumnSelected ++javax.swing.JTable.addColumn ++javax.swing.table.AbstractTableModel.getColumnName ++javax.swing.table.AbstractTableModel.fireTableDataChanged ++javax.swing.table.AbstractTableModel.fireTableStructureChanged ++javax.swing.table.AbstractTableModel.fireTableRowsUpdated ++javax.swing.table.AbstractTableModel.fireTableRowsDeleted ++javax.swing.table.AbstractTableModel.getColumnClass ++javax.swing.table.AbstractTableModel.isCellEditable ++javax.swing.table.AbstractTableModel.fireTableChanged ++javax.swing.table.AbstractTableModel.findColumn ++javax.swing.table.AbstractTableModel.fireTableRowsInserted ++javax.swing.table.AbstractTableModel.fireTableCellUpdated ++javax.swing.table.DefaultTableCellRenderer.getTableCellRendererComponent ++javax.swing.table.DefaultTableColumnModel.getListeners ++javax.swing.table.DefaultTableColumnModel.getColumn ++javax.swing.table.DefaultTableColumnModel.getColumnModelListeners ++javax.swing.table.DefaultTableColumnModel.getSelectedColumnCount ++javax.swing.table.DefaultTableColumnModel.getSelectionModel ++javax.swing.table.DefaultTableColumnModel.getTotalColumnWidth ++javax.swing.table.DefaultTableColumnModel.constructor ++javax.swing.table.DefaultTableColumnModel.getColumns ++javax.swing.table.DefaultTableColumnModel.setColumnSelectionAllowed ++javax.swing.table.DefaultTableColumnModel.getColumnSelectionAllowed ++javax.swing.table.DefaultTableColumnModel.getColumnCount ++javax.swing.table.DefaultTableColumnModel.setSelectionModel ++javax.swing.table.DefaultTableColumnModel.setColumnMargin ++javax.swing.table.DefaultTableColumnModel.getColumnIndexAtX ++javax.swing.table.DefaultTableColumnModel.addColumnModelListener ++javax.swing.table.DefaultTableColumnModel.removeColumn ++javax.swing.table.DefaultTableColumnModel.moveColumn ++javax.swing.table.DefaultTableColumnModel.addColumn ++javax.swing.table.DefaultTableColumnModel.getColumnIndex ++javax.swing.table.DefaultTableColumnModel.getColumnMargin ++javax.swing.table.DefaultTableModel.removeRow ++javax.swing.table.DefaultTableModel.getRowCount ++javax.swing.table.DefaultTableModel.setColumnCount ++javax.swing.table.DefaultTableModel.insertRow ++javax.swing.table.DefaultTableModel.getColumnCount ++javax.swing.table.DefaultTableModel.convertToVector ++javax.swing.table.DefaultTableModel.setColumnIdentifiers ++javax.swing.table.DefaultTableModel.isCellEditable ++javax.swing.table.DefaultTableModel.setDataVector ++javax.swing.table.DefaultTableModel.setValueAt ++javax.swing.table.DefaultTableModel.moveRow ++javax.swing.table.DefaultTableModel.newDataAvailable ++javax.swing.table.DefaultTableModel.addRow ++javax.swing.table.DefaultTableModel.constructors ++javax.swing.table.DefaultTableModel.getDataVector ++javax.swing.table.DefaultTableModel.rowsRemoved ++javax.swing.table.DefaultTableModel.addColumn ++javax.swing.table.DefaultTableModel.setRowCount ++javax.swing.table.DefaultTableModel.getValueAt ++javax.swing.table.TableColumn.setCellEditor ++javax.swing.table.TableColumn.getHeaderRenderer ++javax.swing.table.TableColumn.getIdentifier ++javax.swing.table.TableColumn.setPreferredWidth ++javax.swing.table.TableColumn.setHeaderValue ++javax.swing.table.TableColumn.getCellRenderer ++javax.swing.table.TableColumn.sizeWidthToFit ++javax.swing.table.TableColumn.setResizable ++javax.swing.table.TableColumn.properties ++javax.swing.table.TableColumn.removePropertyChangeListener ++javax.swing.table.TableColumn.setIdentifier ++javax.swing.table.TableColumn.addPropertyChangeListener ++javax.swing.table.TableColumn.setMinWidth ++javax.swing.table.TableColumn.setModelIndex ++javax.swing.table.TableColumn.setCellRenderer ++javax.swing.table.TableColumn.getModelIndex ++javax.swing.table.TableColumn.setMaxWidth ++javax.swing.table.TableColumn.setWidth ++javax.swing.table.TableColumn.constants ++javax.swing.table.TableColumn.setHeaderRenderer ++javax.swing.table.TableColumn.getHeaderValue ++javax.swing.table.JTableHeader.AccessibleJTableHeader.AccessibleJTableHeaderEntry.getLocale ++javax.swing.table.JTableHeader.AccessibleJTableHeader.AccessibleJTableHeaderEntry.isFocusTraversable ++javax.swing.table.JTableHeader.AccessibleJTableHeader.AccessibleJTableHeaderEntry.setAccessibleName ++javax.swing.table.JTableHeader.AccessibleJTableHeader.AccessibleJTableHeaderEntry.getAccessibleName ++javax.swing.table.JTableHeader.AccessibleJTableHeader.AccessibleJTableHeaderEntry.getAccessibleDescription ++javax.swing.table.JTableHeader.AccessibleJTableHeader.AccessibleJTableHeaderEntry.getAccessibleIndexInParent ++javax.swing.table.JTableHeader.AccessibleJTableHeader.AccessibleJTableHeaderEntry.getAccessibleContext ++javax.swing.table.JTableHeader.AccessibleJTableHeader.AccessibleJTableHeaderEntry.isVisible ++javax.swing.table.JTableHeader.AccessibleJTableHeader.AccessibleJTableHeaderEntry.getAccessibleChild ++javax.swing.table.JTableHeader.AccessibleJTableHeader.AccessibleJTableHeaderEntry.getAccessibleRole ++javax.swing.table.JTableHeader.AccessibleJTableHeader.AccessibleJTableHeaderEntry.getAccessibleComponent ++javax.swing.table.JTableHeader.AccessibleJTableHeader.AccessibleJTableHeaderEntry.getAccessibleChildrenCount ++javax.swing.table.JTableHeader.AccessibleJTableHeader.AccessibleJTableHeaderEntry.getAccessibleValue ++javax.swing.table.JTableHeader.AccessibleJTableHeader.getAccessibleChild ++javax.swing.table.JTableHeader.AccessibleJTableHeader.getAccessibleRole ++javax.swing.table.JTableHeader.AccessibleJTableHeader.getAccessibleChildrenCount ++javax.swing.table.JTableHeader.getAccessibleContext ++javax.swing.event.InternalFrameEvent.constructor ++javax.swing.event.InternalFrameEvent.getInternalFrame ++javax.swing.event.InternalFrameEvent.paramString ++javax.swing.event.ListDataEvent.getIndex0 ++javax.swing.event.ListDataEvent.constructor ++javax.swing.event.ListDataEvent.getType ++javax.swing.event.ListDataEvent.getSource ++javax.swing.event.ListDataEvent.getIndex1 ++javax.swing.event.ListDataEvent.toString ++javax.swing.event.TableModelEvent.constructors ++javax.swing.event.SwingPropertyChangeSupport.firePropertyChange ++javax.swing.event.SwingPropertyChangeSupport.getPropertyChangeListeners ++javax.swing.event.SwingPropertyChangeSupport.constructor ++javax.swing.event.SwingPropertyChangeSupport.hasListeners ++javax.swing.event.SwingPropertyChangeSupport.removePropertyChangeListener ++javax.swing.event.SwingPropertyChangeSupport.addPropertyChangeListener ++javax.swing.event.EventListenerList.getListeners ++javax.swing.event.EventListenerList.add ++javax.swing.event.EventListenerList.getListenerList ++javax.swing.event.EventListenerList.getListenerCount ++javax.swing.event.EventListenerList.toString ++javax.swing.event.EventListenerList.remove ++javax.swing.event.TreeSelectionEvent.isAddedPath ++javax.swing.event.TreeSelectionEvent.cloneWithSource ++javax.swing.event.TreeSelectionEvent.constructors ++javax.swing.JDesktopPane.constructor ++javax.swing.JRadioButton.isFocusable ++javax.swing.JRadioButton.uidelegate ++javax.swing.JRadioButton.getActionCommand ++javax.swing.JRadioButton.model ++javax.swing.JMenuBar.getComponentIndex ++javax.swing.JMenuBar.getHelpMenu ++javax.swing.JMenuBar.getSubElements ++javax.swing.JMenuBar.setMargin ++javax.swing.JMenuBar.getInputMap ++javax.swing.JMenuBar.setHelpMenu ++javax.swing.JMenuBar.getActionMap ++javax.swing.JMenuBar.getMenu ++javax.swing.JCheckBoxMenuItem.uidelegate ++javax.swing.JCheckBoxMenuItem.getActionCommand ++javax.swing.JCheckBoxMenuItem.model ++javax.swing.JCheckBoxMenuItem.constructors ++javax.swing.JComboBox.getPrototypeDisplayValue ++javax.swing.JComboBox.MutableTest2 ++javax.swing.JComboBox.setEditor ++javax.swing.JComboBox.getEditor ++javax.swing.JComboBox.addItem ++javax.swing.JComboBox.MutableTest1 ++javax.swing.JComboBox.removeItem ++javax.swing.JComboBox.setModel ++javax.swing.JComboBox.setSelectedIndex ++javax.swing.JComboBox.model ++javax.swing.JComboBox.listenerList ++javax.swing.JComboBox.setPrototypeDisplayValue ++javax.swing.JComboBox.setEditable ++javax.swing.JComboBox.SimpleSelectionTest ++javax.swing.OverlayLayout.layoutContainer ++javax.swing.OverlayLayout.getLayoutAlignmentY ++javax.swing.OverlayLayout.getLayoutAlignmentX ++javax.swing.OverlayLayout.preferredLayoutSize ++javax.swing.OverlayLayout.minimumLayoutSize ++javax.swing.OverlayLayout.maximumLayoutSize ++javax.swing.JScrollPane.AccessibleJScrollPane.resetViewport ++javax.swing.JScrollPane.createHorizontalScrollBar ++javax.swing.JScrollPane.getInputMap ++javax.swing.JScrollPane.createVerticalScrollBar ++javax.swing.JScrollPane.getActionMap ++javax.swing.DefaultListCellRenderer.getListCellRendererComponent ++javax.swing.JFileChooser.setFileHidingEnabled ++javax.swing.JFileChooser.getControlButtonsAreShown ++javax.swing.JFileChooser.setApproveButtonMnemonic ++javax.swing.JFileChooser.setFileSystemView ++javax.swing.JFileChooser.getApproveButtonText ++javax.swing.JFileChooser.setDialogTitle ++javax.swing.JFileChooser.setDialogType ++javax.swing.JFileChooser.setSelectedFile ++javax.swing.JFileChooser.getChoosableFileFilters ++javax.swing.JFileChooser.getApproveButtonMnemonic ++javax.swing.JFileChooser.setApproveButtonText ++javax.swing.JFileChooser.getDialogType ++javax.swing.JFileChooser.getFileFilter ++javax.swing.JFileChooser.setAccessory ++javax.swing.JFileChooser.isAcceptAllFileFilterUsed ++javax.swing.JFileChooser.accept ++javax.swing.JFileChooser.setControlButtonsAreShown ++javax.swing.JFileChooser.setFileView ++javax.swing.JFileChooser.setCurrentDirectory ++javax.swing.JFileChooser.isFileHidingEnabled ++javax.swing.JFileChooser.constructors ++javax.swing.JFileChooser.changeToParentDirectory ++javax.swing.JFileChooser.getFileView ++javax.swing.JFileChooser.getFileSystemView ++javax.swing.JFileChooser.setSelectedFiles ++javax.swing.JFileChooser.setFileFilter ++javax.swing.JFileChooser.getFileSelectionMode ++javax.swing.JFileChooser.setFileSelectionMode ++javax.swing.JFileChooser.getSelectedFiles ++javax.swing.JFileChooser.getAccessory ++javax.swing.JFileChooser.setApproveButtonToolTipText ++javax.swing.JFileChooser.getApproveButtonToolTipText ++javax.swing.JFileChooser.getDialogTitle ++javax.swing.SpinnerDateModel.getPreviousValue ++javax.swing.SpinnerDateModel.setStart ++javax.swing.SpinnerDateModel.setEnd ++javax.swing.SpinnerDateModel.setValue ++javax.swing.SpinnerDateModel.getNextValue ++javax.swing.SpinnerDateModel.constructors ++javax.swing.JDialog.isRootPaneCheckingEnabled ++javax.swing.JPopupMenu.getInputMap ++javax.swing.JSlider.getPaintTrack ++javax.swing.JSlider.addChangeListener ++javax.swing.JSlider.setMinimum ++javax.swing.JSlider.setSnapToTicks ++javax.swing.JSlider.setOrientation ++javax.swing.JSlider.getMinorTickSpacing ++javax.swing.JSlider.setValue ++javax.swing.JSlider.setMaximum ++javax.swing.JSlider.setExtent ++javax.swing.JSlider.setPaintLabels ++javax.swing.JSlider.getSnapToTicks ++javax.swing.JSlider.setMinorTickSpacing ++javax.swing.JSlider.getUIClassID ++javax.swing.JSlider.setLabelTable ++javax.swing.JSlider.getPaintTicks ++javax.swing.JSlider.setModel ++javax.swing.JSlider.getMaximum ++javax.swing.JSlider.setPaintTicks ++javax.swing.JSlider.getPaintLabels ++javax.swing.JSlider.getMinimum ++javax.swing.JSlider.getModel ++javax.swing.JSlider.getInverted ++javax.swing.JSlider.setInverted ++javax.swing.JSlider.constructors ++javax.swing.JSlider.getMajorTickSpacing ++javax.swing.JSlider.createStandardLabels ++javax.swing.JSlider.setMajorTickSpacing ++javax.swing.JSlider.getExtent ++javax.swing.JSlider.setPaintTrack ++javax.swing.JSlider.getLabelTable ++javax.swing.DefaultButtonModel.addActionListener ++javax.swing.DefaultButtonModel.setPressed ++javax.swing.DefaultButtonModel.setEnabled ++javax.swing.DefaultButtonModel.constructor ++javax.swing.DefaultButtonModel.setActionCommand ++javax.swing.DefaultButtonModel.getSelectedObjects ++javax.swing.DefaultButtonModel.setArmed ++javax.swing.DefaultButtonModel.setSelected ++javax.swing.DefaultButtonModel.constants ++javax.swing.DefaultButtonModel.setRollover ++javax.swing.DefaultButtonModel.setGroup ++javax.swing.JComponent.getListeners ++javax.swing.JComponent.setVerifyInputWhenFocusTarget ++javax.swing.JComponent.getVetoableChangeListeners ++javax.swing.JComponent.setInheritsPopupMenu ++javax.swing.JComponent.getPreferredSize ++javax.swing.JComponent.setEnabled ++javax.swing.JComponent.constructor ++javax.swing.JComponent.registerKeyboardAction ++javax.swing.JComponent.setUI ++javax.swing.JComponent.getActionForKeyStroke ++javax.swing.JComponent.setMinimumSize ++javax.swing.JComponent.setPreferredSize ++javax.swing.JComponent.setDefaultLocale ++javax.swing.JComponent.setBackground ++javax.swing.JComponent.setBorder ++javax.swing.JComponent.getInputMap ++javax.swing.JComponent.setVisible ++javax.swing.JComponent.setAlignmentX ++javax.swing.JComponent.setComponentPopupMenu ++javax.swing.JComponent.getFont ++javax.swing.JComponent.setAlignmentY ++javax.swing.JComponent.setMaximumSize ++javax.swing.JComponent.setFont ++javax.swing.JComponent.getVerifyInputWhenFocusTarget ++javax.swing.JComponent.putClientProperty ++javax.swing.JComponent.getAlignmentY ++javax.swing.JComponent.getComponentPopupMenu ++javax.swing.JComponent.getAlignmentX ++javax.swing.JComponent.removeVetoableChangeListener ++javax.swing.JComponent.getRegisteredKeyStrokes ++javax.swing.JComponent.setForeground ++javax.swing.JComponent.paint ++javax.swing.JComponent.setOpaque ++javax.swing.JComponent.addVetoableChangeListener ++javax.swing.JProgressBar.isStringPainted ++javax.swing.JProgressBar.setOrientation ++javax.swing.JProgressBar.setValue ++javax.swing.JProgressBar.setBorderPainted ++javax.swing.JProgressBar.setStringPainted ++javax.swing.JProgressBar.getAccessibleContext ++javax.swing.JProgressBar.getPercentComplete ++javax.swing.JProgressBar.paramString ++javax.swing.JMenu.uidelegate ++javax.swing.JMenu.getInputMap ++javax.swing.JMenu.getActionCommand ++javax.swing.JMenu.model ++javax.swing.JMenu.getPopUpMenu ++javax.swing.JMenu.remove ++javax.swing.UIManager.LookAndFeelInfo.constructor ++javax.swing.UIManager.getPropertyChangeListeners ++javax.swing.UIManager.getBoolean ++javax.swing.UIManager.addAuxiliaryLookAndFeel ++javax.swing.UIManager.getString ++javax.swing.UIManager.getBorder ++javax.swing.UIManager.getIcon ++javax.swing.UIManager.getInsets ++javax.swing.UIManager.removeAuxiliaryLookAndFeel ++javax.swing.UIManager.setLookAndFeel ++javax.swing.UIManager.getFont ++javax.swing.UIManager.getDefaults ++javax.swing.UIManager.getInt ++javax.swing.UIManager.addPropertyChangeListener ++javax.swing.UIManager.getUI ++javax.swing.UIManager.getDimension ++javax.swing.UIManager.getLookAndFeelDefaults ++javax.swing.UIManager.getCrossPlatformLookAndFeelClassName ++javax.swing.UIManager.getColor ++javax.swing.UIManager.getAuxiliaryLookAndFeels ++javax.swing.JPanel.setBorder ++javax.swing.JPanel.Layouter ++javax.swing.JTree.getCellRenderer ++javax.swing.JTree.isRowSelected ++javax.swing.JTree.setModel ++javax.swing.TransferHandler.exportToClipboard ++javax.swing.TransferHandler.TransferActionConstructor ++javax.swing.TransferHandler.canImport ++javax.swing.TransferHandler.importData ++javax.swing.TransferHandler.createTransferable ++javax.swing.JToolTip.setComponent ++javax.swing.JToolTip.setTipText ++javax.swing.JToolTip.getAccessibleContext ++javax.swing.JToolTip.getUIClassID ++javax.swing.JToolTip.getTipText ++javax.swing.JToolTip.paramString ++javax.swing.JToolTip.getComponent ++javax.swing.JToolBar.buttonInsets ++javax.swing.JToolBar.getInputMap ++javax.swing.JRootPane.RootLayout.layoutContainer ++javax.swing.JRootPane.RootLayout.getLayoutAlignmentY ++javax.swing.JRootPane.RootLayout.getLayoutAlignmentX ++javax.swing.JRootPane.RootLayout.preferredLayoutSize ++javax.swing.JRootPane.setLayeredPane ++javax.swing.ScrollPaneLayout.minimumLayoutSize ++javax.swing.Timer.preparatory ++javax.swing.Timer.setDelay ++javax.swing.Timer.basic ++javax.swing.Timer.test_23918 ++javax.swing.Timer.setInitialDelay ++javax.swing.ActionMap.newMapKeysNull ++javax.swing.InputMap.put ++javax.swing.InputMap.constructor ++javax.swing.InputMap.keys ++javax.swing.InputMap.setParent ++javax.swing.InputMap.size ++javax.swing.InputMap.newMapKeysNull ++javax.swing.InputMap.get ++javax.swing.InputMap.getParent ++javax.swing.InputMap.remove ++javax.swing.InputMap.clear ++javax.swing.InputMap.allKeys ++javax.swing.filechooser.FileView.getDescription ++javax.swing.filechooser.FileView.getIcon ++javax.swing.filechooser.FileView.getName ++javax.swing.filechooser.FileView.isTraversable ++javax.swing.filechooser.FileView.getTypeDescription ++javax.swing.filechooser.FileSystemView.getFileSystemView ++javax.swing.ViewportLayout.layoutContainer ++javax.swing.ViewportLayout.minimumLayoutSize ++javax.swing.JList.AccessibleJList.AccessibleJListChild.isFocusTraversable ++javax.swing.JList.AccessibleJList.AccessibleJListChild.isEnabled ++javax.swing.JList.AccessibleJList.AccessibleJListChild.setEnabled ++javax.swing.JList.AccessibleJList.AccessibleJListChild.setCursor ++javax.swing.JList.AccessibleJList.AccessibleJListChild.getForeground ++javax.swing.JList.AccessibleJList.AccessibleJListChild.setBackground ++javax.swing.JList.AccessibleJList.AccessibleJListChild.isVisible ++javax.swing.JList.AccessibleJList.AccessibleJListChild.isShowing ++javax.swing.JList.AccessibleJList.AccessibleJListChild.getFont ++javax.swing.JList.AccessibleJList.AccessibleJListChild.getCursor ++javax.swing.JList.AccessibleJList.AccessibleJListChild.getAccessibleRole ++javax.swing.JList.AccessibleJList.AccessibleJListChild.setFont ++javax.swing.JList.AccessibleJList.AccessibleJListChild.getAccessibleStateSet ++javax.swing.JList.AccessibleJList.AccessibleJListChild.setForeground ++javax.swing.JList.AccessibleJList.AccessibleJListChild.getBackground ++javax.swing.JList.AccessibleJList.valueChanged ++javax.swing.JList.AccessibleJList.getAccessibleChild ++javax.swing.JList.AccessibleJList.getAccessibleRole ++javax.swing.JList.AccessibleJList.intervalAdded ++javax.swing.JList.AccessibleJList.intervalRemoved ++javax.swing.JList.AccessibleJList.contentsChanged ++javax.swing.JList.AccessibleJList.getAccessibleStateSet ++javax.swing.JList.setValueIsAdjusting ++javax.swing.JList.setVisibleRowCount ++javax.swing.JList.setSelectionBackground ++javax.swing.JList.setBackground ++javax.swing.JList.getSelectionBackground ++javax.swing.JList.getInputMap ++javax.swing.JList.setModel ++javax.swing.JList.getNextMatch ++javax.swing.JList.setLayoutOrientation ++javax.swing.JList.constructors ++javax.swing.JList.getBackground ++javax.swing.ToolTipManager.setReshowDelay ++javax.swing.ToolTipManager.setInitialDelay ++javax.swing.ToolTipManager.setDismissDelay ++javax.swing.AbstractButton.constructor ++javax.swing.AbstractButton.setRolloverEnabled ++javax.swing.AbstractButton.setRolloverSelectedIcon ++javax.swing.AbstractButton.init ++javax.swing.AbstractButton.setVerticalTextPosition ++javax.swing.AbstractButton.setHorizontalAlignment ++javax.swing.AbstractButton.setHorizontalTextPosition ++javax.swing.AbstractButton.setRolloverIcon ++javax.swing.AbstractButton.setVerticalAlignment ++javax.swing.AbstractButton.createChangeListener ++javax.swing.JLayeredPane.defaultLayoutManager ++javax.swing.JLayeredPane.getComponentsInLayer ++javax.swing.JLayeredPane.setPosition ++javax.swing.JLayeredPane.moveToFront ++javax.swing.JLayeredPane.addImpl ++javax.swing.DefaultListModel.toArray ++javax.swing.DefaultListModel.trimToSize ++javax.swing.DefaultListModel.removeAllElements ++javax.swing.DefaultListModel.constructor ++javax.swing.DefaultListModel.add ++javax.swing.DefaultListModel.lastElement ++javax.swing.DefaultListModel.copyInto ++javax.swing.DefaultListModel.firstElement ++javax.swing.DefaultListModel.setElementAt ++javax.swing.DefaultListModel.set ++javax.swing.DefaultListModel.elementAt ++javax.swing.DefaultListModel.ensureCapacity ++javax.swing.DefaultListModel.indexOf ++javax.swing.DefaultListModel.insertElementAt ++javax.swing.DefaultListModel.setSize ++javax.swing.DefaultListModel.lastIndexOf ++javax.swing.DefaultListModel.elements ++javax.swing.DefaultListModel.size ++javax.swing.DefaultListModel.getSize ++javax.swing.DefaultListModel.capacity ++javax.swing.DefaultListModel.removeElementAt ++javax.swing.DefaultListModel.get ++javax.swing.DefaultListModel.contains ++javax.swing.DefaultListModel.getElementAt ++javax.swing.DefaultListModel.removeRange ++javax.swing.DefaultListModel.addElement ++javax.swing.DefaultListModel.removeElement ++javax.swing.DefaultListModel.isEmpty ++javax.swing.DefaultListModel.remove ++javax.swing.DefaultListModel.clear ++javax.swing.JFormattedTextField.JFormattedTextFieldTests ++javax.swing.AbstractAction.clone ++javax.swing.AbstractAction.setEnabled ++javax.swing.AbstractAction.getValue ++javax.swing.AbstractAction.constructors ++javax.swing.AbstractAction.putValue ++javax.swing.JTextArea.preferredSize ++javax.swing.JTextArea.isValidChecks ++javax.swing.JTextArea.text ++javax.swing.JTextArea.gettingText ++javax.swing.JCheckBox.constructor ++javax.swing.JCheckBox.isFocusable ++javax.swing.JCheckBox.uidelegate ++javax.swing.JCheckBox.getActionCommand ++javax.swing.JCheckBox.model ++javax.swing.JButton.uidelegate ++javax.swing.JButton.getActionCommand ++javax.swing.JButton.model ++javax.swing.SizeRequirements.calculateAlignedPositions ++javax.swing.tree.TreePath.getParentPath ++javax.swing.tree.TreePath.serialization ++javax.swing.tree.TreePath.getPath ++javax.swing.tree.TreePath.getLastPathComponent ++javax.swing.tree.TreePath.PR27651 ++javax.swing.tree.TreePath.isDescendant ++javax.swing.tree.TreePath.equals ++javax.swing.tree.TreePath.getPathCount ++javax.swing.tree.TreePath.constructors ++javax.swing.tree.TreePath.getPathComponent ++javax.swing.tree.TreePath.pathByAddingChild ++javax.swing.tree.DefaultMutableTreeNode.clone ++javax.swing.tree.DefaultMutableTreeNode.getChildAt ++javax.swing.tree.DefaultMutableTreeNode.getChildBefore ++javax.swing.tree.DefaultMutableTreeNode.getSiblingCount ++javax.swing.tree.DefaultMutableTreeNode.isNodeSibling ++javax.swing.tree.DefaultMutableTreeNode.getIndex ++javax.swing.tree.DefaultMutableTreeNode.isNodeChild ++javax.swing.tree.DefaultMutableTreeNode.add ++javax.swing.tree.DefaultMutableTreeNode.getFirstChild ++javax.swing.tree.DefaultMutableTreeNode.getPath ++javax.swing.tree.DefaultMutableTreeNode.removeAllChildren ++javax.swing.tree.DefaultMutableTreeNode.getChildCount ++javax.swing.tree.DefaultMutableTreeNode.isNodeRelated ++javax.swing.tree.DefaultMutableTreeNode.getNextSibling ++javax.swing.tree.DefaultMutableTreeNode.getPreviousSibling ++javax.swing.tree.DefaultMutableTreeNode.setAllowsChildren ++javax.swing.tree.DefaultMutableTreeNode.getPreviousLeaf ++javax.swing.tree.DefaultMutableTreeNode.isNodeAncestor ++javax.swing.tree.DefaultMutableTreeNode.getNextPreviousNode ++javax.swing.tree.DefaultMutableTreeNode.isNodeDescendant ++javax.swing.tree.DefaultMutableTreeNode.breadthFirstEnumeration ++javax.swing.tree.DefaultMutableTreeNode.getLastLeaf ++javax.swing.tree.DefaultMutableTreeNode.children ++javax.swing.tree.DefaultMutableTreeNode.getAllowsChildren ++javax.swing.tree.DefaultMutableTreeNode.getFirstLeaf ++javax.swing.tree.DefaultMutableTreeNode.constructors ++javax.swing.tree.DefaultMutableTreeNode.isLeaf ++javax.swing.tree.DefaultMutableTreeNode.insert ++javax.swing.tree.DefaultMutableTreeNode.DefaultMutableTreeNodeTest ++javax.swing.tree.DefaultMutableTreeNode.getParent ++javax.swing.tree.DefaultMutableTreeNode.getLastChild ++javax.swing.tree.DefaultMutableTreeNode.pathFromAncestorEnumeration ++javax.swing.tree.DefaultMutableTreeNode.remove ++javax.swing.tree.DefaultMutableTreeNode.getNextLeaf ++javax.swing.tree.DefaultMutableTreeNode.getChildAfter ++javax.swing.tree.DefaultTreeCellRenderer.getBackgroundSelectionColor ++javax.swing.tree.DefaultTreeCellRenderer.getOpenIcon ++javax.swing.tree.DefaultTreeCellRenderer.getClosedIcon ++javax.swing.tree.DefaultTreeCellRenderer.setOpenIcon ++javax.swing.tree.DefaultTreeCellRenderer.constructor ++javax.swing.tree.DefaultTreeCellRenderer.getLeafIcon ++javax.swing.tree.DefaultTreeCellRenderer.getBorderSelectionColor ++javax.swing.tree.DefaultTreeCellRenderer.setLeafIcon ++javax.swing.tree.DefaultTreeCellRenderer.getDefaultLeafIcon ++javax.swing.tree.DefaultTreeCellRenderer.setBackgroundSelectionColor ++javax.swing.tree.DefaultTreeCellRenderer.getDefaultOpenIcon ++javax.swing.tree.DefaultTreeCellRenderer.getDefaultClosedIcon ++javax.swing.tree.DefaultTreeCellRenderer.setBackgroundNonSelectionColor ++javax.swing.tree.DefaultTreeCellRenderer.setBorderSelectionColor ++javax.swing.tree.DefaultTreeCellRenderer.getBackgroundNonSelectionColor ++javax.swing.tree.DefaultTreeCellRenderer.setClosedIcon ++javax.swing.tree.VariableHeightLayoutCache.getBounds ++javax.swing.BoxLayout.layoutContainer ++javax.swing.BoxLayout.invalidateLayout ++javax.swing.BoxLayout.constructor ++javax.swing.BoxLayout.simplevertical ++javax.swing.BoxLayout.maximumLayoutSize2 ++javax.swing.BoxLayout.getLayoutAlignmentY ++javax.swing.BoxLayout.horizontal2 ++javax.swing.BoxLayout.getLayoutAlignmentX ++javax.swing.BoxLayout.simplehorizontal ++javax.swing.BoxLayout.constants ++javax.swing.BoxLayout.preferredLayoutSize ++javax.swing.BoxLayout.minimumLayoutSize ++javax.swing.BoxLayout.maximumLayoutSize ++javax.swing.JApplet.isRootPaneCheckingEnabled ++javax.swing.DefaultBoundedRangeModel.setValueIsAdjusting ++javax.swing.DefaultBoundedRangeModel.setMinimum ++javax.swing.DefaultBoundedRangeModel.serialization ++javax.swing.DefaultBoundedRangeModel.setValue ++javax.swing.DefaultBoundedRangeModel.setMaximum ++javax.swing.DefaultBoundedRangeModel.setExtent ++javax.swing.DefaultBoundedRangeModel.getChangeListeners ++javax.swing.DefaultBoundedRangeModel.getValue ++javax.swing.DefaultBoundedRangeModel.setRangeProperties ++javax.swing.DefaultBoundedRangeModel.getMaximum ++javax.swing.DefaultBoundedRangeModel.getValueIsAdjusting ++javax.swing.DefaultBoundedRangeModel.getMinimum ++javax.swing.DefaultBoundedRangeModel.general ++javax.swing.DefaultBoundedRangeModel.toString ++javax.swing.DefaultBoundedRangeModel.getExtent ++javax.swing.undo.UndoableEditSupport.getUndoableEditListeners ++javax.swing.undo.UndoableEditSupport.createCompoundEdit ++javax.swing.undo.UndoableEditSupport.toString ++javax.swing.undo.UndoableEditSupport.beginUpdate ++javax.swing.undo.UndoableEditSupport.UndoableEditSupport ++javax.swing.undo.UndoableEditSupport.getUpdateLevel ++javax.swing.undo.CompoundEdit.lastEdit ++javax.swing.undo.CompoundEdit.addEdit ++javax.swing.undo.CompoundEdit.canRedo ++javax.swing.undo.CompoundEdit.canUndo ++javax.swing.undo.CompoundEdit.isInProgress ++javax.swing.undo.AbstractUndoableEdit.getRedoPresentationName ++javax.swing.undo.AbstractUndoableEdit.addEdit ++javax.swing.undo.AbstractUndoableEdit.canRedo ++javax.swing.undo.AbstractUndoableEdit.redo ++javax.swing.undo.AbstractUndoableEdit.undo ++javax.swing.undo.AbstractUndoableEdit.canUndo ++javax.swing.undo.AbstractUndoableEdit.getPresentationName ++javax.swing.undo.AbstractUndoableEdit.replaceEdit ++javax.swing.undo.AbstractUndoableEdit.isSignificant ++javax.swing.undo.AbstractUndoableEdit.getUndoPresentationName ++javax.swing.undo.StateEdit.undo ++javax.swing.undo.StateEdit.getPresentationName ++javax.swing.undo.UndoManager.getRedoPresentationName ++javax.swing.undo.UndoManager.getUndoOrRedoPresentationName ++javax.swing.undo.UndoManager.addEdit ++javax.swing.undo.UndoManager.canRedo ++javax.swing.undo.UndoManager.trimEdits ++javax.swing.undo.UndoManager.undoTo ++javax.swing.undo.UndoManager.editToBeUndone ++javax.swing.undo.UndoManager.canUndo ++javax.swing.undo.UndoManager.undoableEditHappened ++javax.swing.undo.UndoManager.setLimit ++javax.swing.undo.UndoManager.discardAllEdits ++javax.swing.undo.UndoManager.redoTo ++javax.swing.undo.UndoManager.end ++javax.swing.undo.UndoManager.toString ++javax.swing.undo.UndoManager.getUndoPresentationName ++javax.swing.undo.UndoManager.getLimit ++javax.swing.undo.UndoManager.editToBeRedone ++javax.swing.undo.UndoManager.canUndoOrRedo ++javax.swing.JMenuItem.getActionCommand ++javax.swing.JMenuItem.constructors ++javax.swing.DefaultComboBoxModel.removeAllElements ++javax.swing.DefaultComboBoxModel.insertElementAt ++javax.swing.DefaultComboBoxModel.getIndexOf ++javax.swing.DefaultComboBoxModel.getSize ++javax.swing.DefaultComboBoxModel.constructors ++javax.swing.DefaultComboBoxModel.removeElementAt ++javax.swing.DefaultComboBoxModel.getSelectedItem ++javax.swing.DefaultComboBoxModel.getElementAt ++javax.swing.DefaultComboBoxModel.addElement ++javax.swing.DefaultComboBoxModel.removeElement ++javax.swing.SizeSequence.getIndex ++javax.swing.SizeSequence.getSizes ++javax.swing.SizeSequence.removeEntries ++javax.swing.SizeSequence.setSizes ++javax.swing.SizeSequence.setSize ++javax.swing.SizeSequence.getSize ++javax.swing.SizeSequence.constructors ++javax.swing.SizeSequence.insertEntries ++javax.swing.JTextField.CopyPaste ++javax.swing.JTextField.createDefaultModel ++javax.swing.JTextField.setDocument ++javax.swing.JTextField.fireActionPerformed ++javax.swing.UIDefaults.putDefaults ++javax.swing.UIDefaults.getBoolean ++javax.swing.UIDefaults.remove ++javax.swing.RepaintManager.addDirtyRegion ++javax.swing.JInternalFrame.AccessibleJInternalFrame.getAccessibleName ++javax.swing.JInternalFrame.AccessibleJInternalFrame.getCurrentAccessibleValue ++javax.swing.JInternalFrame.AccessibleJInternalFrame.getMaximumAccessibleValue ++javax.swing.JInternalFrame.AccessibleJInternalFrame.setCurrentAccessibleValue ++javax.swing.JInternalFrame.AccessibleJInternalFrame.getAccessibleRole ++javax.swing.JInternalFrame.AccessibleJInternalFrame.getAccessibleValue ++javax.swing.JInternalFrame.AccessibleJInternalFrame.getMinimumAccessibleValue ++javax.swing.JInternalFrame.getDesktopIcon ++javax.swing.JInternalFrame.isRootPaneCheckingEnabled ++javax.swing.JInternalFrame.setDefaultCloseOperation ++javax.swing.JInternalFrame.isIconifiable ++javax.swing.JInternalFrame.setTitle ++javax.swing.JInternalFrame.setClosable ++javax.swing.JInternalFrame.setResizable ++javax.swing.JInternalFrame.setIconifiable ++javax.swing.JInternalFrame.setFrameIcon ++javax.swing.JInternalFrame.setMaximizable ++javax.swing.JInternalFrame.getInputMap ++javax.swing.JInternalFrame.setClosed ++javax.swing.JInternalFrame.setNormalBounds ++javax.swing.JInternalFrame.setSelected2 ++javax.swing.JInternalFrame.dispose ++javax.swing.JInternalFrame.isResizable ++javax.swing.JInternalFrame.constructors ++javax.swing.JInternalFrame.paramString ++javax.swing.JInternalFrame.setDesktopIcon ++javax.swing.JInternalFrame.getNormalBounds ++javax.swing.SpinnerListModel.Constructors ++javax.swing.SpinnerListModel.Ordering ++javax.swing.SpinnerListModel.SetList ++javax.swing.SpinnerListModel.ArrayModel ++javax.swing.SpinnerListModel.ListModel ++javax.swing.JRadioButtonMenuItem.isFocusable ++javax.swing.JRadioButtonMenuItem.uidelegate ++javax.swing.JRadioButtonMenuItem.getActionCommand ++javax.swing.JRadioButtonMenuItem.model ++javax.swing.KeyStroke.getKeyStroke ++javax.swing.SpinnerNumberModel.getPreviousValue ++javax.swing.SpinnerNumberModel.setMinimum ++javax.swing.SpinnerNumberModel.setValue ++javax.swing.SpinnerNumberModel.setMaximum ++javax.swing.SpinnerNumberModel.getNextValue ++javax.swing.SpinnerNumberModel.setStepSize ++javax.swing.SpinnerNumberModel.constructors ++javax.swing.JLabel.AccessibleJLabel.getAccessibleName ++javax.swing.JLabel.setDisplayedMnemonicIndex ++javax.swing.JLabel.constructor ++javax.swing.JLabel.getAccessibleContext ++javax.swing.JLabel.Icon ++javax.swing.JLabel.getInputMap ++javax.swing.JLabel.setHorizontalAlignment ++javax.swing.JLabel.setFont ++javax.swing.JLabel.setDisplayedMnemonic ++javax.swing.JLabel.getActionMap ++javax.swing.JLabel.Mnemonic ++javax.swing.JLabel.paramString ++javax.swing.JLabel.setVerticalAlignment ++javax.swing.JEditorPane.ContentType ++javax.swing.JEditorPane.ViewType ++javax.swing.JEditorPane.setText ++javax.swing.JEditorPane.getScrollableTracks ++javax.swing.DefaultListSelectionModel.clone ++javax.swing.DefaultListSelectionModel.getListeners ++javax.swing.DefaultListSelectionModel.setValueIsAdjusting ++javax.swing.DefaultListSelectionModel.setAnchorSelectionIndex ++javax.swing.DefaultListSelectionModel.removeListSelectionListener ++javax.swing.DefaultListSelectionModel.constructor ++javax.swing.DefaultListSelectionModel.removeIndexInterval ++javax.swing.DefaultListSelectionModel.removeSelectionInterval ++javax.swing.DefaultListSelectionModel.getAnchorSelectionIndex ++javax.swing.DefaultListSelectionModel.getLeadSelectionIndex ++javax.swing.DefaultListSelectionModel.isLeadAnchorNotificationEnabled ++javax.swing.DefaultListSelectionModel.setSelectionInterval ++javax.swing.DefaultListSelectionModel.getSelectionMode ++javax.swing.DefaultListSelectionModel.isSelectedIndex ++javax.swing.DefaultListSelectionModel.moveLeadSelectionIndex ++javax.swing.DefaultListSelectionModel.leadSelectionIndex ++javax.swing.DefaultListSelectionModel.getMaxSelectionIndex ++javax.swing.DefaultListSelectionModel.getMinSelectionIndex ++javax.swing.DefaultListSelectionModel.getListSelectionListeners ++javax.swing.DefaultListSelectionModel.getValueIsAdjusting ++javax.swing.DefaultListSelectionModel.isSelectionEmpty ++javax.swing.DefaultListSelectionModel.addSelectionInterval ++javax.swing.DefaultListSelectionModel.setLeadSelectionIndex ++javax.swing.DefaultListSelectionModel.toString ++javax.swing.DefaultListSelectionModel.setSelectionMode ++javax.swing.DefaultListSelectionModel.clearSelection ++javax.swing.DefaultListSelectionModel.insertIndexInterval ++javax.swing.DefaultListSelectionModel.addListSelectionListener ++javax.swing.SwingUtilities.calculateInnerArea ++javax.swing.SwingUtilities.isRectangleContainingRectangle ++javax.swing.SwingUtilities.computeIntersection ++javax.swing.SwingUtilities.replaceUIActionMap ++javax.swing.SwingUtilities.computeUnion ++javax.swing.JSpinner.ListEditor.constructor ++javax.swing.JSpinner.ListEditor.getModel ++javax.swing.JSpinner.NumberEditor.getModel ++javax.swing.JSpinner.NumberEditor.constructors ++javax.swing.JSpinner.NumberEditor.getFormat ++javax.swing.JSpinner.DefaultEditor.constructor ++javax.swing.JSpinner.DefaultEditor.stateChanged ++javax.swing.JSpinner.DefaultEditor.propertyChange ++javax.swing.JSpinner.DefaultEditor.preferredLayoutSize ++javax.swing.JSpinner.DefaultEditor.minimumLayoutSize ++javax.swing.JSpinner.DateEditor.getModel ++javax.swing.JSpinner.getPreviousValue ++javax.swing.JSpinner.addChangeListener ++javax.swing.JSpinner.createEditor ++javax.swing.JSpinner.getChangeListeners ++javax.swing.JSpinner.setEditor ++javax.swing.JSpinner.getUIClassID ++javax.swing.JSpinner.getEditor ++javax.swing.JSpinner.setModel ++javax.swing.JSpinner.getNextValue ++javax.swing.JSpinner.getModel ++javax.swing.JSpinner.constructors ++javax.swing.JSpinner.removeChangeListener ++javax.swing.ButtonGroup.constructor ++javax.swing.ButtonGroup.add ++javax.swing.ButtonGroup.isSelected ++javax.swing.ButtonGroup.getSelection ++javax.swing.ButtonGroup.getElements ++javax.swing.ButtonGroup.getButtonCount ++javax.swing.ButtonGroup.setSelected ++javax.swing.ButtonGroup.remove ++javax.swing.JViewport.setView ++gnu.javax.crypto.jce.TestOfDHKeyAgreement2 ++gnu.javax.crypto.jce.TestOfPR27849 ++gnu.javax.crypto.jce.TestOfPR27228 ++java.rmi.server.Uniqueness ++java.lang.Integer.compareTo ++java.lang.Integer.decode ++java.lang.Integer.new_Integer ++java.lang.Integer.IntegerTest ++java.lang.Integer.getInteger ++java.lang.Integer.parseInt ++java.lang.ClassLoader.redefine ++java.lang.ClassLoader.security ++java.lang.ClassLoader.loadClass ++java.lang.ClassLoader.initialize ++java.lang.ClassLoader.Resources ++java.lang.ClassLoader.BootDefinedPackages ++java.lang.Number.NumberTest ++java.lang.ProcessBuilder.simple ++java.lang.System.getProperty ++java.lang.System.arraycopy ++java.lang.System.identityHashCode ++java.lang.ThreadGroup.enumerate ++java.lang.ThreadGroup.insecurity ++java.lang.ThreadGroup.security ++java.lang.String.substring ++java.lang.String.compareTo ++java.lang.String.getBytes13 ++java.lang.String.hash ++java.lang.String.split ++java.lang.String.getBytes14 ++java.lang.String.startsWith ++java.lang.String.equals ++java.lang.String.indexOf ++java.lang.String.to ++java.lang.String.StringTest ++java.lang.String.surrogate ++java.lang.String.charAt ++java.lang.String.new_String ++java.lang.String.getBytes ++java.lang.String.CASE_INSENSITIVE_ORDER ++java.lang.Class.ClassTest ++java.lang.Class.serialization ++java.lang.Class.reflect ++java.lang.Class.security ++java.lang.Class.init ++java.lang.Class.newInstance ++java.lang.Class.reflect2 ++java.lang.Cloneable.CloneableTest ++java.lang.ref.WeakReference.weakref ++java.lang.ref.PhantomReference.phantom ++java.lang.Double.compareTo ++java.lang.Double.DoubleSetterTest ++java.lang.Double.DoubleTest ++java.lang.Double.toHexString ++java.lang.Double.parseDouble ++java.lang.Double.compare ++java.lang.Double.valueOf ++java.lang.StrictMath.cbrt ++java.lang.StrictMath.expm1 ++java.lang.StrictMath.cosh ++java.lang.StrictMath.sinh ++java.lang.StrictMath.tanh ++java.lang.Short.hash ++java.lang.Short.ShortTest ++java.lang.InheritableThreadLocal.simple ++java.lang.Math.max ++java.lang.Math.ulp ++java.lang.Math.sin ++java.lang.Math.min ++java.lang.Math.cos ++java.lang.Math.MathTest ++java.lang.Math.rint ++java.lang.Boolean.BooleanTest ++java.lang.Boolean.equals_Boolean ++java.lang.Boolean.new_Boolean ++java.lang.Boolean.hashcode_Boolean ++java.lang.Boolean.get ++java.lang.Boolean.value ++java.lang.Float.compareTo ++java.lang.Float.toHexString ++java.lang.Float.compare ++java.lang.Float.parseFloat ++java.lang.Float.valueOf ++java.lang.Float.FloatTest ++java.lang.Float.new_Float ++java.lang.reflect.Array.set ++java.lang.reflect.Array.newInstance ++java.lang.reflect.AccessibleObject.accessible ++java.lang.reflect.Field.promotion ++java.lang.reflect.Field.access ++java.lang.reflect.Field.toString ++java.lang.reflect.Modifier.toString12 ++java.lang.reflect.Modifier.toString ++java.lang.reflect.Constructor.newInstance ++java.lang.reflect.Constructor.toString ++java.lang.reflect.Proxy.DeclaringClass ++java.lang.reflect.Proxy.check13 ++java.lang.reflect.Proxy.ExceptionRaising ++java.lang.reflect.Proxy.ToString ++java.lang.reflect.InvocationTargetException.Chain ++java.lang.reflect.Method.equals ++java.lang.reflect.Method.toString ++java.lang.reflect.Method.invoke ++java.lang.reflect.ReflectAccess ++java.lang.ThreadLocal.simple ++java.lang.Object.clone ++java.lang.Object.wait ++java.lang.Object.ObjectTest ++java.lang.Object.oom ++java.lang.Byte.ByteTest ++java.lang.Byte.new_Byte ++java.lang.StringBuffer.StringBufferTest ++java.lang.StringBuffer.plus ++java.lang.Long.Tests15 ++java.lang.Long.getLong ++java.lang.Long.LongTest ++java.lang.Long.new_Long ++java.lang.Thread.join ++java.lang.Thread.isAlive ++java.lang.Thread.name ++java.lang.Thread.interrupt ++java.lang.Thread.contextClassLoader ++java.lang.Thread.getThreadGroup ++java.lang.Thread.priority ++java.lang.Thread.stop ++java.lang.Thread.daemon ++java.lang.Package.getPackage ++java.lang.Character.digit ++java.lang.Character.Blocks ++java.lang.Character.unicode ++java.lang.Character.getNumericValue ++java.lang.Character.getType ++java.lang.Character.hash ++java.lang.Character.classify ++java.lang.Character.classify12 ++java.lang.Character.to ++java.lang.Character.consts ++java.lang.Character.forDigit ++java.lang.Character.getType12 ++java.lang.Character.equals_Character ++java.lang.Character.CharacterTest ++java.lang.Character.Blocks15 ++java.lang.SecurityManager.thread ++java.text.StringCharacterIterator.constructor ++java.text.StringCharacterIterator.iter ++java.text.CollationElementIterator.offset ++java.text.CollationElementIterator.jdk11 ++java.text.AttributedString.constructors ++java.text.AttributedString.addAttribute ++java.text.AttributedString.getIterator ++java.text.AttributedString.addAttributes ++java.text.AttributedString.Test ++java.text.DecimalFormat.setCurrency ++java.text.DecimalFormat.clone ++java.text.DecimalFormat.setDecimalFormatSymbols ++java.text.DecimalFormat.getNegativePrefix ++java.text.DecimalFormat.applyPattern ++java.text.DecimalFormat.formatToCharacterIterator ++java.text.DecimalFormat.getNegativeSuffix ++java.text.DecimalFormat.setNegativePrefix ++java.text.DecimalFormat.applyLocalizedPattern ++java.text.DecimalFormat.getDecimalFormatSymbols ++java.text.DecimalFormat.toLocalizedPattern ++java.text.DecimalFormat.isDecimalSeparatorAlwaysShown ++java.text.DecimalFormat.setNegativeSuffix ++java.text.DecimalFormat.PR27311 ++java.text.DecimalFormat.getPositivePrefix ++java.text.DecimalFormat.getCurrency ++java.text.DecimalFormat.equals ++java.text.DecimalFormat.digits ++java.text.DecimalFormat.setDecimalSeparatorAlwaysShown ++java.text.DecimalFormat.setPositiveSuffix ++java.text.DecimalFormat.parse ++java.text.DecimalFormat.setPositivePrefix ++java.text.DecimalFormat.toPattern14 ++java.text.DecimalFormat.setMultiplier ++java.text.DecimalFormat.format ++java.text.DecimalFormat.getPositiveSuffix ++java.text.DecimalFormat.setGroupingSize ++java.text.DecimalFormat.PR23996 ++java.text.DecimalFormat.toPattern ++java.text.DecimalFormat.constructors ++java.text.DecimalFormat.MaximumAndMinimumDigits ++java.text.DecimalFormat.formatExp ++java.text.DecimalFormat.getGroupingSize ++java.text.DecimalFormat.hashCode ++java.text.DecimalFormat.position ++java.text.DecimalFormat.getMultiplier ++java.text.NumberFormat.UK ++java.text.CharacterIterator.implement ++java.text.ParsePosition.Test ++java.text.BreakIterator.chariter ++java.text.BreakIterator.patho ++java.text.BreakIterator.lineiter ++java.text.BreakIterator.sentiter ++java.text.BreakIterator.worditer ++java.text.Collator.Constants ++java.text.Collator.GetSet ++java.text.ParseException.Test ++java.text.Annotation.Test ++java.text.RuleBasedCollator.CollatorTests ++java.text.RuleBasedCollator.VeryBasic ++java.text.MessageFormat.format14 ++java.text.MessageFormat.attribute ++java.text.MessageFormat.parse ++java.text.MessageFormat.format ++java.text.ChoiceFormat.next ++java.text.ChoiceFormat.parse ++java.text.ChoiceFormat.format ++java.text.DateFormatSymbols.setMonths ++java.text.DateFormatSymbols.setShortMonths ++java.text.DateFormatSymbols.setEras ++java.text.DateFormatSymbols.setAmPmStrings ++java.text.DateFormatSymbols.setWeekdays ++java.text.DateFormatSymbols.setZoneStrings ++java.text.DateFormatSymbols.setShortWeekdays ++java.text.DateFormatSymbols.Test ++java.text.AttributedCharacterIterator.Attribute.toString ++java.text.AttributedCharacterIterator.getRunLimit ++java.text.AttributedCharacterIterator.implement ++java.text.AttributedCharacterIterator.getRunStart ++java.text.AttributedCharacterIterator.getAttribute ++java.text.DateFormat.equals ++java.text.DateFormat.hashCode ++java.text.DateFormat.Test ++java.text.ACIAttribute.Test ++java.text.DecimalFormatSymbols.serial ++java.text.DecimalFormatSymbols.GetSet12 ++java.text.DecimalFormatSymbols.getCurrency ++java.text.DecimalFormatSymbols.DumpDefault11 ++java.text.DecimalFormatSymbols.GetSet11 ++java.text.DecimalFormatSymbols.DumpDefault12 ++java.text.FieldPosition.Test ++java.text.SimpleDateFormat.applyPattern ++java.text.SimpleDateFormat.Localization ++java.text.SimpleDateFormat.applyLocalizedPattern ++java.text.SimpleDateFormat.toLocalizedPattern ++java.text.SimpleDateFormat.attribute ++java.text.SimpleDateFormat.Cloning ++java.text.SimpleDateFormat.getDateFormatSymbols ++java.text.SimpleDateFormat.parse ++java.text.SimpleDateFormat.toPattern ++java.text.SimpleDateFormat.constructors ++java.text.SimpleDateFormat.setDateFormatSymbols ++java.text.SimpleDateFormat.getAndSet2DigitYearStart ++java.text.SimpleDateFormat.Test ++java.text.Bidi.Basic ++java.text.Bidi.reorderVisually ++java.math.BigInteger.compareTo ++java.math.BigInteger.modInverse ++java.math.BigInteger.shift ++java.math.BigInteger.serialization ++java.math.BigInteger.add ++java.math.BigInteger.TestOfPR27372 ++java.math.BigInteger.signum ++java.math.BigInteger.modPow ++java.math.BigInteger.equals ++java.math.BigInteger.divide ++java.math.BigInteger.setBit ++java.math.BigInteger.multiply ++java.math.BigInteger.TestOfToByteArray ++java.math.BigInteger.toString ++java.math.BigInteger.ctor ++java.math.BigInteger.abs ++java.math.BigInteger.valueOf ++java.math.BigDecimal.compareTo ++java.math.BigDecimal.divide ++java.math.BigDecimal.DiagBigDecimal ++java.math.BigDecimal.construct ++java.math.BigDecimal.setScale ++java.security.MessageDigest.Instance ++java.security.AccessController.doPrivileged ++java.security.AccessController.contexts ++java.security.Security.getAlgorithms ++java.security.Security.provider ++java.security.Provider.NameVersionInfo ++java.security.SecureRandom.SHA1PRNG ++java.security.SecureRandom.TestOfPR23899 ++java.security.SecureRandom.Instance ++java.security.KeyPairGenerator.getInstance14 ++java.security.AlgorithmParameters.getInstance14 ++java.security.AlgorithmParameterGenerator.getInstance14 ++java.security.DigestInputStream.readMD5 ++java.security.KeyFactory.getInstance14 ++java.security.Signature.getInstance14 ++java.security.Signature.Instance ++java.security.BasicPermission.newPermission ++java.sql.Clob.ClobTest ++java.sql.Types.TestJdbc10 ++java.sql.Types.TestJdbc20 ++java.sql.Array.ArrayTest ++java.sql.Blob.BlobTest ++java.sql.Connection.TestJdbc ++java.sql.Time.TimeTest ++java.sql.DriverManager.DriverManagerTest ++java.sql.Date.DateTest ++java.sql.DatabaseMetaData.TestJdbc ++java.sql.Timestamp.TimestampTest ++java.io.RandomAccessFile.setLength ++java.io.RandomAccessFile.security ++java.io.RandomAccessFile.jdk11 ++java.io.RandomAccessFile.randomaccessfile ++java.io.BufferedWriter.Test ++java.io.PipedReaderWriter.Test ++java.io.PushbackInputStream.ProtectedVars ++java.io.PushbackInputStream.BufferOverflow ++java.io.PushbackInputStream.Unread ++java.io.FileOutputStream.append ++java.io.FileOutputStream.fileoutputstream ++java.io.FileOutputStream.security ++java.io.FileOutputStream.write ++java.io.FileOutputStream.jdk12 ++java.io.StringWriter.Test ++java.io.FilterOutputStream.write ++java.io.ObjectInputStream.security ++java.io.ObjectInputStream.readResolve ++java.io.ObjectInputStream.ClassLoaderTest ++java.io.ObjectOutputStream.security ++java.io.ObjectOutputStream.StreamDataTest ++java.io.ObjectOutputStream.useProtocolVersion ++java.io.InputStream.Test ++java.io.Utf8Encoding.ReadReference ++java.io.Utf8Encoding.WriteRead ++java.io.FilePermission.traversal2 ++java.io.FilePermission.simple ++java.io.FilePermission.traversal ++java.io.FileWriter.jdk11 ++java.io.OutputStreamWriter.jdk11 ++java.io.Serializable.BreakMe ++java.io.Serializable.BreakMeTestSer ++java.io.Serializable.ParentReadResolve ++java.io.Serializable.readResolve ++java.io.Serializable.ParentWriteReplace ++java.io.ObjectInputOutput.LoopSerializationTest ++java.io.ObjectInputOutput.ExtTest ++java.io.ObjectInputOutput.Deserializable ++java.io.ObjectInputOutput.ProxySerializationTest ++java.io.ObjectInputOutput.Compat1 ++java.io.ObjectInputOutput.SerTest ++java.io.ObjectInputOutput.HierarchyTest ++java.io.ObjectInputOutput.OutputTest ++java.io.ObjectInputOutput.InputTest ++java.io.PrintWriter.jdk11 ++java.io.PrintWriter.checkError ++java.io.SequenceInputStream.Test ++java.io.StringBufferInputStream.ProtectedVars ++java.io.StringBufferInputStream.SimpleRead ++java.io.StringBufferInputStream.MarkReset ++java.io.LineNumberReader.Test2 ++java.io.LineNumberReader.mark ++java.io.LineNumberReader.Test ++java.io.InputStreamReader.hang ++java.io.InputStreamReader.utf8 ++java.io.InputStreamReader.jdk11 ++java.io.InputStreamReader.except ++java.io.InputStreamReader.getEncoding ++java.io.DataInputStream.readLine ++java.io.DataInputStream.ReadReference ++java.io.DataInputStream.ReadReference2 ++java.io.FilterWriter.write ++java.io.CharArrayWriter.ProtectedVars ++java.io.CharArrayWriter.BasicTests ++java.io.FileDescriptor.jdk11 ++java.io.Reader.Test ++java.io.PushbackReader.BufferOverflow ++java.io.PushbackReader.Unread ++java.io.ByteArrayOutputStream.subclass ++java.io.ByteArrayOutputStream.write ++java.io.PrintStream.subclass ++java.io.PrintStream.encodings ++java.io.ObjectStreamClass.ProxyTest ++java.io.ObjectStreamClass.Test ++java.io.FilterReader.SimpleRead ++java.io.FilterReader.MarkReset ++java.io.BufferedOutputStream.interrupt ++java.io.BufferedOutputStream.Test ++java.io.BufferedInputStream.Skip ++java.io.BufferedInputStream.BigMark ++java.io.BufferedInputStream.ProtectedVars ++java.io.BufferedInputStream.ZeroRead ++java.io.BufferedInputStream.SimpleRead ++java.io.BufferedInputStream.MarkReset ++java.io.BufferedReader.mark ++java.io.BufferedReader.SimpleRead ++java.io.BufferedReader.MarkReset ++java.io.BufferedReader.boundary ++java.io.DataOutputStream.WriteRead2 ++java.io.DataOutputStream.writeUTF ++java.io.DataOutputStream.WriteRead ++java.io.PipedStream.close ++java.io.PipedStream.receive ++java.io.PipedStream.Test ++java.io.FileInputStream.fileinputstream ++java.io.FileInputStream.read ++java.io.FileInputStream.security ++java.io.Writer.Test ++java.io.FilterInputStream.SimpleRead ++java.io.FilterInputStream.MarkReset ++java.io.FileReader.jdk11 ++java.io.StringReader.Test ++java.io.ByteArrayInputStream.ProtectedVars ++java.io.ByteArrayInputStream.SimpleRead ++java.io.ByteArrayInputStream.MarkReset ++java.io.File.newFileURI ++java.io.File.URI ++java.io.File.createFile ++java.io.File.security ++java.io.File.ReadMethods ++java.io.File.ExecuteMethods ++java.io.File.list ++java.io.File.jdk11 ++java.io.File.newFile ++java.io.File.WriteMethods ++java.io.File.canWrite ++java.io.File.listFiles ++java.io.File.emptyFile ++java.io.File.UnicodeURI ++java.io.OutputStream.Test ++java.io.StreamTokenizer.WordWhiteChars ++java.io.StreamTokenizer.slashstar ++java.io.StreamTokenizer.commentchar ++java.io.StreamTokenizer.slashslash ++java.io.StreamTokenizer.newline ++java.io.StreamTokenizer.misc ++java.io.StreamTokenizer.Test ++java.io.LineNumberInputStream.Test ++java.io.CharArrayReader.ProtectedVars ++java.io.CharArrayReader.OutOfBounds ++java.io.CharArrayReader.SimpleRead ++java.io.CharArrayReader.MarkReset ++java.util.Stack.AcuniaStackTest ++java.util.Arrays.asList ++java.util.Arrays.fill ++java.util.Arrays.equals ++java.util.Arrays.sort ++java.util.Arrays.binarySearch ++java.util.Properties.getProperty ++java.util.Properties.AcuniaPropertiesTest ++java.util.Properties.load ++java.util.TreeSet.basic ++java.util.Observable.observable ++java.util.StringTokenizer.hasMoreElements ++java.util.StringTokenizer.hasMoreTokens ++java.util.StringTokenizer.countTokens ++java.util.StringTokenizer.nextToken ++java.util.StringTokenizer.constructors ++java.util.StringTokenizer.nextElement ++java.util.IdentityHashMap.simple ++java.util.prefs.PreferenceTest ++java.util.Hashtable.HashContains ++java.util.Hashtable.AcuniaHashtableTest ++java.util.Hashtable.EnumerateAndModify ++java.util.Hashtable.basic ++java.util.Hashtable.ContainsHash ++java.util.Hashtable.NullValue ++java.util.TreeMap.serialization ++java.util.TimeZone.setDefault ++java.util.TimeZone.zdump ++java.util.TimeZone.setID ++java.util.UUID.TestAll ++java.util.Currency.Constructors ++java.util.Currency.Germany ++java.util.Currency.ReferenceEquality ++java.util.Currency.France ++java.util.Currency.getInstance ++java.util.Currency.Japan ++java.util.Currency.UK ++java.util.Currency.Korea ++java.util.Currency.PRC ++java.util.Currency.Canada ++java.util.Currency.CanadaFrench ++java.util.Currency.China ++java.util.Currency.Taiwan ++java.util.Currency.Italy ++java.util.AbstractCollection.AcuniaAbstractCollectionTest ++java.util.AbstractCollection.AcuniaAddCollectionTest ++java.util.AbstractCollection.toString ++java.util.SimpleTimeZone.clone ++java.util.SimpleTimeZone.hasSameRules ++java.util.SimpleTimeZone.getRawOffset ++java.util.SimpleTimeZone.setRawOffset ++java.util.SimpleTimeZone.check14 ++java.util.SimpleTimeZone.equals ++java.util.SimpleTimeZone.inDaylightTime ++java.util.SimpleTimeZone.setStartYear ++java.util.SimpleTimeZone.constructors ++java.util.SimpleTimeZone.getOffset ++java.util.SimpleTimeZone.setDSTSavings ++java.util.SimpleTimeZone.hashCode ++java.util.SimpleTimeZone.constants ++java.util.SimpleTimeZone.getDSTSavings ++java.util.AbstractList.AcuniaAbstractListTest ++java.util.Collections.nCopies ++java.util.Collections.fill ++java.util.Collections.max ++java.util.Collections.copy ++java.util.Collections.rotate ++java.util.Collections.unmodifiableList ++java.util.Collections.min ++java.util.Collections.unmodifiableMap ++java.util.Collections.reverse ++java.util.Collections.reverseOrder ++java.util.Collections.sort ++java.util.Collections.binarySearch ++java.util.logging.SocketHandler.SocketHandler ++java.util.logging.XMLFormatter.getTail ++java.util.logging.Handler.isLoggable ++java.util.logging.Handler.setEncoding ++java.util.logging.Handler.setFilter ++java.util.logging.Handler.getErrorManager ++java.util.logging.Handler.reportError ++java.util.logging.Handler.setLevel ++java.util.logging.Handler.setErrorManager ++java.util.logging.LogRecord.getThreadID ++java.util.logging.LogRecord.setLoggerName ++java.util.logging.LogRecord.getMillis ++java.util.logging.LogRecord.setThrown ++java.util.logging.LogRecord.setThreadID ++java.util.logging.LogRecord.setSequenceNumber ++java.util.logging.LogRecord.setSourceMethodName ++java.util.logging.LogRecord.setResourceBundle ++java.util.logging.LogRecord.setMillis ++java.util.logging.LogRecord.setMessage ++java.util.logging.LogRecord.setParameters ++java.util.logging.LogRecord.setSourceClassName ++java.util.logging.LogRecord.setLevel ++java.util.logging.Level.intValue ++java.util.logging.Level.equals ++java.util.logging.Level.getName ++java.util.logging.Level.parse ++java.util.logging.Level.toString ++java.util.logging.Level.hashCode ++java.util.logging.LogManager.readConfiguration ++java.util.logging.LoggingMXBean.Test ++java.util.logging.Logger.hierarchyChecks ++java.util.logging.Logger.getName ++java.util.logging.Logger.getAnonymousLogger ++java.util.logging.Logger.securityChecks ++java.util.logging.Logger.getParent ++java.util.logging.Logger.global ++java.util.Iterator.ConcurrentModification ++java.util.BitSet.AcuniaBitSetTest ++java.util.BitSet.flip ++java.util.BitSet.jdk10 ++java.util.BitSet.get ++java.util.BitSet.clear ++java.util.Timer.taskException ++java.util.GregorianCalendar.first ++java.util.GregorianCalendar.setFirstDayOfWeek ++java.util.GregorianCalendar.dayOfWeekInMonth ++java.util.GregorianCalendar.setWeekOfMonth ++java.util.GregorianCalendar.internal ++java.util.GregorianCalendar.equals ++java.util.GregorianCalendar.getMinimum ++java.util.GregorianCalendar.getMinimalDaysInFirstWeek ++java.util.GregorianCalendar.conversion ++java.util.GregorianCalendar.weekOfYear ++java.util.Random.basic ++java.util.AbstractSet.AcuniaAbstractSetTest ++java.util.Calendar.TimeZone ++java.util.Calendar.ampm ++java.util.Calendar.add ++java.util.Calendar.minmax ++java.util.Calendar.setTime ++java.util.Calendar.setTimeZone ++java.util.Calendar.getInstance ++java.util.Calendar.set ++java.util.Calendar.simple ++java.util.Calendar.roll ++java.util.Calendar.dstOffset ++java.util.regex.Matcher.hitEnd ++java.util.regex.Pattern.UnicodeSimpleCategory ++java.util.regex.Pattern.pcrematches ++java.util.regex.Pattern.matches ++java.util.regex.PatternSplit ++java.util.regex.CharacterClasses ++java.util.AbstractSequentialList.AcuniaAbstractSequentialListTest ++java.util.LinkedList.subList ++java.util.LinkedList.AcuniaLinkedListTest ++java.util.LinkedList.SubListTest ++java.util.HashMap.AcuniaHashMapTest ++java.util.jar.JarInputStream.getNextEntry ++java.util.jar.JarFile.TestOfManifest ++java.util.jar.JarFile.basic ++java.util.Vector.subList ++java.util.Vector.copyInto ++java.util.Vector.removeAll ++java.util.Vector.retainAll ++java.util.Vector.VectorSerialization ++java.util.zip.ZipEntry.time ++java.util.zip.ZipEntry.Size ++java.util.zip.ZipEntry.newZipEntry ++java.util.zip.ZipEntry.setComment ++java.util.zip.ZipInputStream.close ++java.util.zip.ZipInputStream.basic ++java.util.zip.Deflater.PR27435 ++java.util.zip.Adler32.checksum ++java.util.zip.GZIPInputStream.PR24461 ++java.util.zip.GZIPInputStream.basic ++java.util.zip.InflaterInputStream.basic ++java.util.zip.ZipFile.DirEntryTest ++java.util.zip.ZipFile.NoEntryTest ++java.util.zip.ZipFile.newZipFile ++java.util.Date.compareTo ++java.util.Date.clone ++java.util.Date.before ++java.util.Date.serialization ++java.util.Date.range ++java.util.Date.equals ++java.util.Date.parse ++java.util.Date.after ++java.util.Date.getTimezoneOffset ++java.util.LinkedHashMap.LinkedHashMapTest ++java.util.LinkedHashMap.Regress ++java.util.ArrayList.subList ++java.util.ArrayList.serial ++java.util.ResourceBundle.getBundle ++java.util.AbstractMap.AcuniaAbstractMapTest ++java.beans.XMLDecoder.jdk14 ++java.beans.Expression.check ++java.beans.Introspector.getBeanInfo2 ++java.beans.Introspector.getBeanInfo4 ++java.beans.Introspector.getBeanInfo ++java.beans.Introspector.jdk11 ++java.beans.Introspector.jdk12 ++java.beans.Introspector.getBeanInfo2_2 ++java.beans.DesignMode.constants ++java.beans.PropertyEditorSupport.setValue ++java.beans.PropertyEditorSupport.getSource ++java.beans.SimpleBeanInfo.getIcon ++java.beans.SimpleBeanInfo.getDefaultPropertyIndex ++java.beans.SimpleBeanInfo.getEventSetDescriptors ++java.beans.SimpleBeanInfo.getDefaultEventIndex ++java.beans.SimpleBeanInfo.getBeanDescriptor ++java.beans.SimpleBeanInfo.getAdditionalBeanInfo ++java.beans.SimpleBeanInfo.loadImage ++java.beans.EventSetDescriptor.constructorTest1 ++java.beans.FeatureDescriptor.check ++java.beans.beancontext.BeanContextSupport.toArray ++java.beans.beancontext.BeanContextSupport.getChildPropertyChangeListener ++java.beans.beancontext.BeanContextSupport.serialize ++java.beans.beancontext.BeanContextSupport.getChildBeanContextChild ++java.beans.beancontext.BeanContextSupport.getChildBeanContextMembershipListener ++java.beans.beancontext.BeanContextSupport.setDesignTime ++java.beans.beancontext.BeanContextSupport.getChildVisibility ++java.beans.beancontext.BeanContextSupport.constructors ++java.beans.beancontext.BeanContextSupport.getChildVetoableChangeListener ++java.beans.beancontext.BeanContextSupport.getChildSerializable ++java.beans.beancontext.BeanContextSupport.getBeanContextPeer ++java.beans.beancontext.BeanContextServicesSupport.getChildBeanContextServicesListener ++java.beans.beancontext.InstantiateChild ++java.beans.beancontext.Remove ++java.beans.beancontext.Array ++java.beans.beancontext.Add ++java.beans.PropertyChangeSupport.firePropertyChange ++java.beans.VetoableChangeSupport.addVetoableChangeListener ++java.beans.Statement.check ++java.beans.Beans.instantiate_1 ++java.beans.EventHandler.check ++java.beans.EventHandler.check14b ++java.beans.PropertyDescriptor.constructorTest2 ++java.beans.PropertyDescriptor.constructorTest1 ++java.beans.MethodDescriptor.constructorTest1 ++java.awt.AWTEvent.constants ++java.awt.CardLayout.show ++java.awt.CardLayout.first ++java.awt.CardLayout.testMaximumLayoutSize ++java.awt.CardLayout.testMinimumLayoutSize ++java.awt.Graphics.clearRect ++java.awt.Polygon.contains ++java.awt.Polygon.getPathIterator ++java.awt.BasicStroke.equals ++java.awt.BasicStroke.constructors ++java.awt.BasicStroke.hashCode ++java.awt.BasicStroke.constants ++java.awt.Graphics2D.setClip ++java.awt.Graphics2D.getClip ++java.awt.Graphics2D.getClipBounds ++java.awt.Graphics2D.setTransform ++java.awt.Graphics2D.clip ++java.awt.Graphics2D.transform ++java.awt.Point.move ++java.awt.Point.clone ++java.awt.Point.equals ++java.awt.Point.constructors ++java.awt.Point.setLocation ++java.awt.Point.translate ++java.awt.Point.getLocation ++java.awt.event.ComponentEvent.paramString ++java.awt.image.ShortLookupTable.getTable ++java.awt.image.ShortLookupTable.lookupPixel ++java.awt.image.ShortLookupTable.constructors ++java.awt.image.PixelGrabber.SimpleGrabber ++java.awt.image.PixelGrabber.testNullProducer ++java.awt.image.ConvolveOp.getEdgeCondition ++java.awt.image.ConvolveOp.getKernel ++java.awt.image.ConvolveOp.filterRaster ++java.awt.image.ConvolveOp.getRenderingHints ++java.awt.image.ConvolveOp.getPoint2D ++java.awt.image.ConvolveOp.createCompatibleDestRaster ++java.awt.image.ConvolveOp.constructors ++java.awt.image.ConvolveOp.getBounds2D ++java.awt.image.ConvolveOp.constants ++java.awt.image.IndexColorModel.getGreens ++java.awt.image.IndexColorModel.getComponentSize ++java.awt.image.IndexColorModel.getTransparentPixel ++java.awt.image.IndexColorModel.getColorSpace ++java.awt.image.IndexColorModel.getAlphas ++java.awt.image.IndexColorModel.getPixelSize ++java.awt.image.IndexColorModel.isValid ++java.awt.image.IndexColorModel.constructors ++java.awt.image.IndexColorModel.getBlues ++java.awt.image.IndexColorModel.getReds ++java.awt.image.IndexColorModel.getTransparency ++java.awt.image.DataBuffer.getDataTypeSize ++java.awt.image.DataBuffer.getOffset ++java.awt.image.DataBuffer.constants ++java.awt.image.DataBuffer.getOffsets ++java.awt.image.DataBufferUShort.getData ++java.awt.image.DataBufferUShort.getBankData ++java.awt.image.DataBufferUShort.setElem ++java.awt.image.DataBufferUShort.getElem ++java.awt.image.DataBufferUShort.getDataType ++java.awt.image.DataBufferUShort.constructors ++java.awt.image.ComponentSampleModel.setPixels ++java.awt.image.ComponentSampleModel.getPixelStride ++java.awt.image.ComponentSampleModel.getDataElements ++java.awt.image.ComponentSampleModel.getSample ++java.awt.image.ComponentSampleModel.getBandOffsets ++java.awt.image.ComponentSampleModel.equals ++java.awt.image.ComponentSampleModel.getPixel ++java.awt.image.ComponentSampleModel.getNumDataElements ++java.awt.image.ComponentSampleModel.getSamples ++java.awt.image.ComponentSampleModel.getSampleSize ++java.awt.image.ComponentSampleModel.getSampleFloat ++java.awt.image.ComponentSampleModel.setDataElements ++java.awt.image.ComponentSampleModel.constructors ++java.awt.image.ComponentSampleModel.setPixel ++java.awt.image.ComponentSampleModel.createDataBuffer ++java.awt.image.ComponentSampleModel.getOffset ++java.awt.image.ComponentSampleModel.getSampleDouble ++java.awt.image.ComponentSampleModel.createCompatibleSampleModel ++java.awt.image.ComponentSampleModel.setSamples ++java.awt.image.ComponentSampleModel.hashCode ++java.awt.image.ComponentSampleModel.getPixels ++java.awt.image.ComponentSampleModel.getScanlineStride ++java.awt.image.ComponentSampleModel.createSubsetSampleModel ++1 problem (1 error)java.awt.image.ColorModel.getComponentSize ++java.awt.image.ColorModel.getRGBdefault ++java.awt.image.ColorModel.constructors ++java.awt.image.LookupTable.constructor ++java.awt.image.LookupTable.getNumComponents ++java.awt.image.RescaleOp.getScaleFactors ++java.awt.image.RescaleOp.getRenderingHints ++java.awt.image.RescaleOp.getPoint2D ++java.awt.image.RescaleOp.constructors ++java.awt.image.RescaleOp.getNumFactors ++java.awt.image.RescaleOp.getOffsets ++java.awt.image.DirectColorModel.coerceData ++java.awt.image.WritableRaster.createChild ++java.awt.image.WritableRaster.createWritableChild ++java.awt.image.ComponentColorModel.coerceData ++java.awt.image.SampleModel.setPixels ++java.awt.image.SampleModel.getSample ++java.awt.image.SampleModel.getPixel ++java.awt.image.SampleModel.getSamples ++java.awt.image.SampleModel.getSampleSize ++java.awt.image.SampleModel.getSampleFloat ++java.awt.image.SampleModel.setPixel ++java.awt.image.SampleModel.setSample ++java.awt.image.SampleModel.createDataBuffer ++java.awt.image.SampleModel.getSampleDouble ++java.awt.image.SampleModel.setSamples ++java.awt.image.SampleModel.getPixels ++java.awt.image.BandCombineOp.getPoint2D ++java.awt.image.BandCombineOp.createCompatibleDestRaster ++java.awt.image.BandCombineOp.getBounds2D ++java.awt.image.BandCombineOp.filter ++java.awt.image.LookupOp.constructor ++java.awt.image.LookupOp.filterRaster ++java.awt.image.LookupOp.getRenderingHints ++java.awt.image.LookupOp.getTable ++java.awt.image.LookupOp.getPoint2D ++java.awt.image.Kernel.getYOrigin ++java.awt.image.Kernel.constructor ++java.awt.image.Kernel.getXOrigin ++java.awt.image.Kernel.getHeight ++java.awt.image.Kernel.check ++java.awt.image.Kernel.getKernelData ++java.awt.image.Kernel.getWidth ++java.awt.image.BufferedImage.getSubimage ++java.awt.image.BufferedImage.getSetRgb1Pixel ++java.awt.image.DataBufferByte.getData ++java.awt.image.DataBufferByte.getBankData ++java.awt.image.DataBufferByte.setElem ++java.awt.image.DataBufferByte.getElem ++java.awt.image.DataBufferByte.getDataType ++java.awt.image.DataBufferByte.constructors ++java.awt.image.DataBufferShort.getData ++java.awt.image.DataBufferShort.getBankData ++java.awt.image.DataBufferShort.setElem ++java.awt.image.DataBufferShort.getElem ++java.awt.image.DataBufferShort.getDataType ++java.awt.image.DataBufferShort.constructors ++java.awt.image.Raster.createChild ++java.awt.image.DataBufferInt.getData ++java.awt.image.DataBufferInt.getBankData ++java.awt.image.DataBufferInt.setElem ++java.awt.image.DataBufferInt.getElem ++java.awt.image.DataBufferInt.getDataType ++java.awt.image.DataBufferInt.constructors ++java.awt.image.SinglePixelPackedSampleModel.getBitOffsets ++java.awt.image.SinglePixelPackedSampleModel.getDataElements ++java.awt.image.SinglePixelPackedSampleModel.getSample ++java.awt.image.SinglePixelPackedSampleModel.equals ++java.awt.image.SinglePixelPackedSampleModel.getPixel ++java.awt.image.SinglePixelPackedSampleModel.getNumDataElements ++java.awt.image.SinglePixelPackedSampleModel.getSamples ++java.awt.image.SinglePixelPackedSampleModel.getSampleSize ++java.awt.image.SinglePixelPackedSampleModel.setDataElements ++java.awt.image.SinglePixelPackedSampleModel.constructors ++java.awt.image.SinglePixelPackedSampleModel.getBitMasks ++java.awt.image.SinglePixelPackedSampleModel.setPixel ++java.awt.image.SinglePixelPackedSampleModel.setSample ++java.awt.image.SinglePixelPackedSampleModel.createDataBuffer ++java.awt.image.SinglePixelPackedSampleModel.getOffset ++java.awt.image.SinglePixelPackedSampleModel.createCompatibleSampleModel ++java.awt.image.SinglePixelPackedSampleModel.hashCode ++java.awt.image.SinglePixelPackedSampleModel.getPixels ++java.awt.image.SinglePixelPackedSampleModel.getScanlineStride ++java.awt.image.SinglePixelPackedSampleModel.createSubsetSampleModel ++java.awt.image.ByteLookupTable.getTable ++java.awt.image.ByteLookupTable.lookupPixel ++java.awt.image.ByteLookupTable.constructors ++java.awt.image.MultiPixelPackedSampleModel.getBitOffset ++java.awt.image.MultiPixelPackedSampleModel.getTransferType ++java.awt.image.MultiPixelPackedSampleModel.getDataElements ++java.awt.image.MultiPixelPackedSampleModel.getSample ++java.awt.image.MultiPixelPackedSampleModel.equals ++java.awt.image.MultiPixelPackedSampleModel.getPixel ++java.awt.image.MultiPixelPackedSampleModel.getSampleSize ++java.awt.image.MultiPixelPackedSampleModel.setDataElements ++java.awt.image.MultiPixelPackedSampleModel.constructors ++java.awt.image.MultiPixelPackedSampleModel.setPixel ++java.awt.image.MultiPixelPackedSampleModel.setSample ++java.awt.image.MultiPixelPackedSampleModel.createDataBuffer ++java.awt.image.MultiPixelPackedSampleModel.getPixelBitStride ++java.awt.image.MultiPixelPackedSampleModel.getOffset ++java.awt.image.MultiPixelPackedSampleModel.createCompatibleSampleModel ++java.awt.image.MultiPixelPackedSampleModel.hashCode ++java.awt.image.MultiPixelPackedSampleModel.getScanlineStride ++java.awt.image.MultiPixelPackedSampleModel.createSubsetSampleModel ++java.awt.image.AffineTransformOp.getPoint2D ++java.awt.image.AffineTransformOp.createCompatibleDestRaster ++java.awt.image.AffineTransformOp.constructors ++java.awt.image.AffineTransformOp.getBounds2D ++java.awt.image.PixelInterleavedSampleModel.createSubsetSampleModel ++java.awt.image.BandedSampleModel.setPixels ++java.awt.image.BandedSampleModel.getDataElements ++java.awt.image.BandedSampleModel.getSample ++java.awt.image.BandedSampleModel.getPixel ++java.awt.image.BandedSampleModel.getSamples ++java.awt.image.BandedSampleModel.getSampleFloat ++java.awt.image.BandedSampleModel.setDataElements ++java.awt.image.BandedSampleModel.constructors ++java.awt.image.BandedSampleModel.setPixel ++java.awt.image.BandedSampleModel.setSample ++java.awt.image.BandedSampleModel.createDataBuffer ++java.awt.image.BandedSampleModel.getSampleDouble ++java.awt.image.BandedSampleModel.createCompatibleSampleModel ++java.awt.image.BandedSampleModel.setSamples ++java.awt.image.BandedSampleModel.hashCode ++java.awt.image.BandedSampleModel.getPixels ++java.awt.image.BandedSampleModel.createSubsetSampleModel ++java.awt.image.ColorConvertOp.filterRaster ++java.awt.image.ColorConvertOp.getPoint2D ++java.awt.image.ColorConvertOp.createCompatibleDestRaster ++java.awt.image.ColorConvertOp.constructors ++java.awt.image.ColorConvertOp.getBounds2D ++java.awt.ScrollPaneAdjustable.paramString ++java.awt.GridBagLayout.AdjustForGravity ++java.awt.GridBagLayout.toString ++java.awt.Dimension.clone ++java.awt.Dimension.equals ++java.awt.Dimension.setSize ++java.awt.Dimension.getSize ++java.awt.Dimension.constructors ++java.awt.TextField.getPreferredSize ++java.awt.TextField.getMinimumSize ++java.awt.TextField.constructors ++java.awt.BorderLayout.layoutContainer ++java.awt.BorderLayout.getHgap ++java.awt.BorderLayout.maxLayoutSize ++java.awt.BorderLayout.setVgap ++java.awt.BorderLayout.getVgap ++java.awt.BorderLayout.getLayoutAlignmentY ++java.awt.BorderLayout.Test15 ++java.awt.BorderLayout.getLayoutAlignmentX ++java.awt.BorderLayout.setHgap ++java.awt.BorderLayout.constructors ++java.awt.BorderLayout.constants ++java.awt.BorderLayout.preferredLayoutSize ++java.awt.BorderLayout.addLayoutComponent ++java.awt.Scrollbar.testSetBlockIncrement ++java.awt.Scrollbar.testSetUnitIncrement ++java.awt.Scrollbar.testSetValues ++java.awt.List.testSelected ++java.awt.ScrollPane.add ++java.awt.ScrollPane.getScrollPosition ++java.awt.ScrollPane.testSetLayout ++java.awt.ScrollPane.setScrollPosition ++java.awt.ScrollPane.doLayout ++java.awt.Choice.getSelected ++java.awt.Choice.remove ++java.awt.KeyboardFocusManager.getFocusOwner ++java.awt.KeyboardFocusManager.getGlobalPermanentFocusOwner ++java.awt.KeyboardFocusManager.getGlobalFocusOwner ++java.awt.AWTPermission.constructor ++java.awt.ColorClass.decode ++java.awt.ColorClass.serialization ++java.awt.ColorClass.equals ++java.awt.ColorClass.getGreen ++java.awt.ColorClass.constructors ++java.awt.ColorClass.brighter ++java.awt.ColorClass.hashCode ++java.awt.ColorClass.constants ++java.awt.ColorClass.getBlue ++java.awt.ColorClass.getRed ++java.awt.RenderingHints.Key.isCompatibleValue ++java.awt.RenderingHints.clone ++java.awt.RenderingHints.keySet ++java.awt.RenderingHints.put ++java.awt.RenderingHints.add ++java.awt.RenderingHints.putAll ++java.awt.RenderingHints.entrySet ++java.awt.RenderingHints.equals ++java.awt.RenderingHints.size ++java.awt.RenderingHints.get ++java.awt.RenderingHints.values ++java.awt.RenderingHints.containsValue ++java.awt.RenderingHints.isEmpty ++java.awt.RenderingHints.remove ++java.awt.RenderingHints.clear ++java.awt.Panel.TestPanelRepaint ++java.awt.AWTKeyStroke.serialization ++java.awt.AWTKeyStroke.equals ++java.awt.AWTKeyStroke.getAWTKeyStroke ++java.awt.Rectangle.clone ++java.awt.Rectangle.add ++java.awt.Rectangle.setRect ++java.awt.Rectangle.setBounds ++java.awt.Rectangle.equals ++java.awt.Rectangle.intersects ++java.awt.Rectangle.setSize ++java.awt.Rectangle.grow ++java.awt.Rectangle.outcode ++java.awt.Rectangle.union ++java.awt.Rectangle.constructors ++java.awt.Rectangle.intersection ++java.awt.Rectangle.setLocation ++java.awt.Rectangle.translate ++java.awt.Rectangle.contains ++java.awt.Rectangle.isEmpty ++java.awt.Menu.insert ++java.awt.AlphaComposite.getInstance ++java.awt.AlphaComposite.equals ++java.awt.AlphaComposite.getInstance14 ++java.awt.AlphaComposite.getRule ++java.awt.Window.security ++java.awt.Window.focusCycleRootTest ++java.awt.EventClass.constants ++java.awt.Frame.isDisplayable1 ++java.awt.Frame.isDisplayable5 ++java.awt.Frame.isDisplayable4 ++java.awt.Frame.isDisplayable3 ++java.awt.Frame.isDisplayable6 ++java.awt.Frame.menubar ++java.awt.Frame.isDisplayable2 ++java.awt.Frame.isDisplayable7 ++java.awt.Container.getListeners ++java.awt.Container.setLayout ++java.awt.Container.getPreferredSize ++java.awt.Container.applyComponentOrientation ++java.awt.Container.getComponentAt ++java.awt.Container.getAlignmentY ++java.awt.Container.getAlignmentX ++java.awt.Container.addImpl ++java.awt.FontClass.serialization ++java.awt.MenuItem.label1 ++java.awt.font.TextHitInfo.isLeadingEdge ++java.awt.font.TextHitInfo.getOtherHit ++java.awt.font.TextHitInfo.leading ++java.awt.font.TextHitInfo.getOffsetHit ++java.awt.font.TextHitInfo.equals ++java.awt.font.TextHitInfo.getCharIndex ++java.awt.font.TextHitInfo.afterOffset ++java.awt.font.TextHitInfo.getInsertionIndex ++java.awt.font.TextHitInfo.toString ++java.awt.font.TextHitInfo.beforeOffset ++java.awt.font.TextHitInfo.hashCode ++java.awt.font.TextHitInfo.trailing ++java.awt.font.TransformAttribute.serialization ++java.awt.font.TransformAttribute.getTransform ++java.awt.font.TransformAttribute.isIdentity ++java.awt.font.ShapeGraphicAttribute.ShapeGraphicAttributeTest ++java.awt.font.ImageGraphicAttribute.ImageGraphicAttributeTest ++java.awt.font.TextAttribute.toString13 ++java.awt.font.TextAttribute.serialization ++java.awt.font.TextAttribute.toString ++java.awt.font.TextAttribute.constants13 ++java.awt.font.TextAttribute.constants ++java.awt.GradientPaint.getPoint2 ++java.awt.GradientPaint.equals ++java.awt.GradientPaint.getColor1 ++java.awt.GradientPaint.constructors ++java.awt.GradientPaint.getColor2 ++java.awt.GradientPaint.isCyclic ++java.awt.GradientPaint.getTransparency ++java.awt.GradientPaint.getPoint1 ++java.awt.TextComponent.setSelectionStart ++java.awt.Component.repaint ++java.awt.Component.getListeners ++java.awt.Component.setName ++java.awt.Component.properties14 ++java.awt.Component.getMaximumSize ++java.awt.Component.setMinimumSize ++java.awt.Component.getForeground ++java.awt.Component.setPreferredSize ++java.awt.Component.properties ++java.awt.Component.requestFocus ++java.awt.Component.isValid ++java.awt.Component.getFont ++java.awt.Component.setMaximumSize ++java.awt.Component.setComponentOrientation ++java.awt.datatransfer.DataFlavor.writeExternal ++java.awt.datatransfer.StringSelection.selection ++java.awt.TextArea.getPreferredSize ++java.awt.TextArea.getMinimumSize ++java.awt.TextArea.testReplaceText ++java.awt.TextArea.constructors ++java.awt.TextArea.testAppendText ++java.awt.TextArea.testInsertText ++java.awt.TextArea.testInvalidConstructorValues ++java.awt.color.ColorSpace.isCS_sRGB ++java.awt.color.ColorSpace.getInstance ++java.awt.FlowLayout.minimumLayoutSize ++java.awt.geom.Ellipse2D.Double.clone ++java.awt.geom.Ellipse2D.Double.getY ++java.awt.geom.Ellipse2D.Double.setFrame ++java.awt.geom.Ellipse2D.Double.getHeight ++java.awt.geom.Ellipse2D.Double.getX ++java.awt.geom.Ellipse2D.Double.constructors ++java.awt.geom.Ellipse2D.Double.getBounds2D ++java.awt.geom.Ellipse2D.Double.isEmpty ++java.awt.geom.Ellipse2D.Double.getWidth ++java.awt.geom.Ellipse2D.Float.clone ++java.awt.geom.Ellipse2D.Float.getY ++java.awt.geom.Ellipse2D.Float.setFrame ++java.awt.geom.Ellipse2D.Float.getHeight ++java.awt.geom.Ellipse2D.Float.getX ++java.awt.geom.Ellipse2D.Float.constructors ++java.awt.geom.Ellipse2D.Float.getBounds2D ++java.awt.geom.Ellipse2D.Float.isEmpty ++java.awt.geom.Ellipse2D.Float.getWidth ++java.awt.geom.Ellipse2D.intersects ++java.awt.geom.Ellipse2D.contains ++java.awt.geom.GeneralPath.getCurrentPoint ++java.awt.geom.GeneralPath.GeneralPath ++java.awt.geom.GeneralPath.append_PathIterator ++java.awt.geom.GeneralPath.contains ++java.awt.geom.GeneralPath.getPathIterator ++java.awt.geom.RectangularShape.getFrame ++java.awt.geom.RectangularShape.setFrame ++java.awt.geom.RectangularShape.getMinY ++java.awt.geom.RectangularShape.intersects ++java.awt.geom.RectangularShape.getMaxY ++java.awt.geom.RectangularShape.getMaxX ++java.awt.geom.RectangularShape.setFrameFromCenter ++java.awt.geom.RectangularShape.setFrameFromDiagonal ++java.awt.geom.RectangularShape.getMinX ++java.awt.geom.RectangularShape.getCenterX ++java.awt.geom.RectangularShape.getCenterY ++java.awt.geom.RectangularShape.contains ++java.awt.geom.RectangularShape.isEmpty ++java.awt.geom.RectangularShape.getBounds ++java.awt.geom.Line2D.clone ++java.awt.geom.Line2D.getP1 ++java.awt.geom.Line2D.linesIntersect ++java.awt.geom.Line2D.ptSegDistSq ++java.awt.geom.Line2D.relativeCCW ++java.awt.geom.Line2D.getP2 ++java.awt.geom.Line2D.setLine ++java.awt.geom.Line2D.equals ++java.awt.geom.Line2D.intersects ++java.awt.geom.Line2D.ptLineDistSq ++java.awt.geom.Line2D.ptSegDist ++java.awt.geom.Line2D.intersectsLine ++java.awt.geom.Line2D.contains ++java.awt.geom.Line2D.ptLineDist ++java.awt.geom.Line2D.getPathIterator ++java.awt.geom.Line2D.getBounds ++java.awt.geom.Area.clone ++java.awt.geom.Area.add ++java.awt.geom.Area.isRectangular ++java.awt.geom.Area.createTransformedArea ++java.awt.geom.Area.isPolygonal ++java.awt.geom.Area.intersect ++java.awt.geom.Area.equals ++java.awt.geom.Area.exclusiveOr ++java.awt.geom.Area.intersects ++java.awt.geom.Area.reset ++java.awt.geom.Area.isSingular ++java.awt.geom.Area.constructors ++java.awt.geom.Area.getBounds2D ++java.awt.geom.Area.contains ++java.awt.geom.Area.subtract ++java.awt.geom.Area.isEmpty ++java.awt.geom.Area.transform ++java.awt.geom.Area.getBounds ++java.awt.geom.Rectangle2D.Double.clone ++java.awt.geom.Rectangle2D.Double.createIntersection ++java.awt.geom.Rectangle2D.Double.setRect ++java.awt.geom.Rectangle2D.Double.outcode ++java.awt.geom.Rectangle2D.Double.createUnion ++java.awt.geom.Rectangle2D.Double.isEmpty ++java.awt.geom.Rectangle2D.Float.clone ++java.awt.geom.Rectangle2D.Float.createIntersection ++java.awt.geom.Rectangle2D.Float.setRect ++java.awt.geom.Rectangle2D.Float.outcode ++java.awt.geom.Rectangle2D.Float.createUnion ++java.awt.geom.Rectangle2D.Float.isEmpty ++java.awt.geom.Rectangle2D.add ++java.awt.geom.Rectangle2D.setFrame ++java.awt.geom.Rectangle2D.intersect ++java.awt.geom.Rectangle2D.equals ++java.awt.geom.Rectangle2D.intersects ++java.awt.geom.Rectangle2D.union ++java.awt.geom.Rectangle2D.intersectsLine ++java.awt.geom.Rectangle2D.getBounds2D ++java.awt.geom.Rectangle2D.contains ++java.awt.geom.Rectangle2D.constants ++java.awt.geom.Rectangle2D.getPathIterator ++java.awt.geom.Rectangle2D.getBounds ++java.awt.geom.Arc2D.Double.clone ++java.awt.geom.Arc2D.Float.clone ++java.awt.geom.Arc2D.setFrame ++java.awt.geom.Arc2D.setAngleStart ++java.awt.geom.Arc2D.setArc ++java.awt.geom.Arc2D.setArcByTangent ++java.awt.geom.Arc2D.containsAngle ++java.awt.geom.Arc2D.setArcByCenter ++java.awt.geom.Arc2D.intersects ++java.awt.geom.Arc2D.getEndPoint ++java.awt.geom.Arc2D.setAngleExtent ++java.awt.geom.Arc2D.constructors ++java.awt.geom.Arc2D.getBounds2D ++java.awt.geom.Arc2D.contains ++java.awt.geom.Arc2D.getStartPoint ++java.awt.geom.Arc2D.constants ++java.awt.geom.Arc2D.isEmpty ++java.awt.geom.Arc2D.getPathIterator ++java.awt.geom.Arc2D.setAngles ++java.awt.geom.Arc2D.setArcType ++java.awt.geom.QuadCurve2D.Double.getP1 ++java.awt.geom.QuadCurve2D.Double.getP2 ++java.awt.geom.QuadCurve2D.Double.getCtrlPt ++java.awt.geom.QuadCurve2D.Double.getBounds2D ++java.awt.geom.QuadCurve2D.Double.Double ++java.awt.geom.QuadCurve2D.Double.setCurve ++java.awt.geom.QuadCurve2D.Float.getP1 ++java.awt.geom.QuadCurve2D.Float.Float ++java.awt.geom.QuadCurve2D.Float.getP2 ++java.awt.geom.QuadCurve2D.Float.getCtrlPt ++java.awt.geom.QuadCurve2D.Float.getBounds2D ++java.awt.geom.QuadCurve2D.Float.setCurve ++java.awt.geom.QuadCurve2D.clone ++java.awt.geom.QuadCurve2D.getFlatness ++java.awt.geom.QuadCurve2D.subdivide ++java.awt.geom.QuadCurve2D.getFlatnessSq ++java.awt.geom.QuadCurve2D.setCurve ++java.awt.geom.QuadCurve2D.getPathIterator ++java.awt.geom.CubicCurve2D.Double.getP1 ++java.awt.geom.CubicCurve2D.Double.getCtrlP2 ++java.awt.geom.CubicCurve2D.Double.getP2 ++java.awt.geom.CubicCurve2D.Double.getBounds2D ++java.awt.geom.CubicCurve2D.Double.Double ++java.awt.geom.CubicCurve2D.Double.setCurve ++java.awt.geom.CubicCurve2D.Double.getCtrlP1 ++java.awt.geom.CubicCurve2D.Float.getP1 ++java.awt.geom.CubicCurve2D.Float.getCtrlP2 ++java.awt.geom.CubicCurve2D.Float.Float ++java.awt.geom.CubicCurve2D.Float.getP2 ++java.awt.geom.CubicCurve2D.Float.getBounds2D ++java.awt.geom.CubicCurve2D.Float.setCurve ++java.awt.geom.CubicCurve2D.Float.getCtrlP1 ++java.awt.geom.CubicCurve2D.clone ++java.awt.geom.CubicCurve2D.getFlatness ++java.awt.geom.CubicCurve2D.subdivide ++java.awt.geom.CubicCurve2D.getFlatnessSq ++java.awt.geom.CubicCurve2D.setCurve ++java.awt.geom.CubicCurve2D.getPathIterator ++java.awt.geom.FlatteningPathIterator.FlatteningPathIterator ++java.awt.geom.FlatteningPathIterator.getWindingRule ++java.awt.geom.FlatteningPathIterator.getFlatness ++java.awt.geom.FlatteningPathIterator.getRecursionLimit ++java.awt.geom.FlatteningPathIterator.currentSegment ++java.awt.geom.AffineTransform.clone ++java.awt.geom.AffineTransform.preConcatenate ++java.awt.geom.AffineTransform.getRotateInstance ++java.awt.geom.AffineTransform.getMatrix ++java.awt.geom.AffineTransform.inverseTransform ++java.awt.geom.AffineTransform.getScaleInstance ++java.awt.geom.AffineTransform.createInverse ++java.awt.geom.AffineTransform.deltaTransform ++java.awt.geom.AffineTransform.equals ++java.awt.geom.AffineTransform.setTransform ++java.awt.geom.AffineTransform.constructors ++java.awt.geom.AffineTransform.getTranslateInstance ++java.awt.geom.AffineTransform.constants ++java.awt.geom.AffineTransform.concatenate ++java.awt.geom.AffineTransform.getShearInstance ++java.awt.geom.AffineTransform.transform ++java.awt.geom.AffineTransform.getDeterminant ++java.awt.geom.AffineTransform.isIdentity ++java.awt.geom.RoundRectangle2D.intersects ++java.awt.geom.RoundRectangle2D.contains ++java.awt.testName ++java.nio.Buffer.IntBufferTest ++java.nio.Buffer.ShortBufferTest ++java.nio.Buffer.FloatBufferTest ++java.nio.Buffer.LongBufferTest ++java.nio.Buffer.ByteBufferTest ++java.nio.Buffer.DoubleBufferTest ++java.nio.Buffer.CharBufferTest ++java.nio.LongBuffer.compact ++java.nio.ShortBuffer.compact ++java.nio.IntBuffer.compareTo ++java.nio.IntBuffer.compact ++java.nio.channels.Channels.ChannelsTest ++java.nio.channels.Selector.testEmptySelect ++java.nio.channels.FileChannel.copyIO ++java.nio.channels.FileChannel.multibufferIO ++java.nio.channels.FileChannel.truncate ++java.nio.channels.FileChannel.map ++java.nio.channels.FileChannel.offsetSingleBuffer ++java.nio.channels.FileChannel.singlebufferIO ++java.nio.channels.FileChannel.multidirectbufferIO ++java.nio.channels.FileChannel.offsetSingleDirectBuffer ++java.nio.DoubleBuffer.compareTo ++java.nio.DoubleBuffer.compact ++java.nio.ByteBuffer.direct ++java.nio.ByteBuffer.compact ++java.nio.ByteBuffer.TestAllocateDirect ++java.nio.ByteBuffer.GetPut ++java.nio.ByteBuffer.putDouble ++java.nio.ByteBuffer.Order ++java.nio.ByteBuffer.Allocating ++java.nio.CharBuffer.compact ++java.nio.FloatBuffer.compareTo ++java.nio.FloatBuffer.compact ++java.nio.charset.Charset.UTF8Charset ++java.nio.charset.Charset.utf16 ++java.nio.charset.Charset.canEncode ++java.nio.charset.Charset.forName diff --cc debian/nss.cfg index 000000000,000000000..3535a4001 new file mode 100644 --- /dev/null +++ b/debian/nss.cfg @@@ -1,0 -1,0 +1,4 @@@ ++name = NSS ++nssDbMode = noDb ++attributes = compatibility ++handleStartupErrors = ignoreMultipleInitialisation diff --cc debian/patches/8199220.diff index 000000000,000000000..342c23ade new file mode 100644 --- /dev/null +++ b/debian/patches/8199220.diff @@@ -1,0 -1,0 +1,19 @@@ ++# HG changeset patch ++# User enevill ++# Date 1520877434 0 ++# Mon Mar 12 17:57:14 2018 +0000 ++# Node ID 81265b23c488c7fd03ebaee04d1802fc0ada737e ++# Parent ecd91135d645bed6620abbd76ce30a906b6d7d44 ++8199220: Zero build broken after 8195103, 8191102 and 8189871 ++Reviewed-by: dholmes, stuefe, eosterlund, glaubitz, coleenp ++ ++--- a/src/hotspot/share/utilities/debug.hpp +++++ b/src/hotspot/share/utilities/debug.hpp ++@@ -185,6 +185,7 @@ ++ // use of an array type. ++ ++ template struct STATIC_ASSERT_FAILURE; +++template<> struct STATIC_ASSERT_FAILURE { enum { value = 0 }; }; ++ template<> struct STATIC_ASSERT_FAILURE { enum { value = 1 }; }; ++ ++ #define STATIC_ASSERT(Cond) \ diff --cc debian/patches/8276799.diff index 000000000,000000000..47b102b47 new file mode 100644 --- /dev/null +++ b/debian/patches/8276799.diff @@@ -1,0 -1,0 +1,61916 @@@ ++--- a/make/autoconf/build-aux/config.sub +++++ b/make/autoconf/build-aux/config.sub ++@@ -1,6 +1,6 @@ ++ #!/bin/sh ++ # ++-# Copyright (c) 2014, 2020, Oracle and/or its affiliates. All rights reserved. +++# Copyright (c) 2014, 2022, Oracle and/or its affiliates. All rights reserved. ++ # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ # ++ # This code is free software; you can redistribute it and/or modify it ++@@ -46,6 +46,13 @@ if echo $* | grep pc-msys >/dev/null ; t ++ exit ++ fi ++ +++# Canonicalize for riscv which autoconf-config.sub doesn't handle +++if echo $* | grep '^riscv\(32\|64\)-linux' >/dev/null ; then +++ result=`echo $@ | sed 's/linux/unknown-linux/'` +++ echo $result +++ exit +++fi +++ ++ # Filter out everything that doesn't begin with "aarch64-" ++ if ! echo $* | grep '^aarch64-' >/dev/null ; then ++ . $DIR/autoconf-config.sub "$@" ++@@ -78,4 +85,3 @@ result=`echo $result | sed "s/^arm-/aarc ++ ++ echo $result ++ exit $exitcode ++- ++--- a/make/autoconf/jvm-features.m4 +++++ b/make/autoconf/jvm-features.m4 ++@@ -311,7 +311,8 @@ AC_DEFUN_ONCE([JVM_FEATURES_CHECK_SHENAN ++ AC_MSG_CHECKING([if platform is supported by Shenandoah]) ++ if test "x$OPENJDK_TARGET_CPU_ARCH" = "xx86" || \ ++ test "x$OPENJDK_TARGET_CPU" = "xaarch64" || \ ++- test "x$OPENJDK_TARGET_CPU" = "xppc64le"; then +++ test "x$OPENJDK_TARGET_CPU" = "xppc64le" || \ +++ test "x$OPENJDK_TARGET_CPU" = "xriscv64"; then ++ AC_MSG_RESULT([yes]) ++ else ++ AC_MSG_RESULT([no, $OPENJDK_TARGET_CPU]) ++@@ -361,7 +362,8 @@ AC_DEFUN_ONCE([JVM_FEATURES_CHECK_ZGC], ++ AC_MSG_RESULT([no, $OPENJDK_TARGET_OS-$OPENJDK_TARGET_CPU]) ++ AVAILABLE=false ++ fi ++- elif test "x$OPENJDK_TARGET_CPU" = "xppc64le"; then +++ elif test "x$OPENJDK_TARGET_CPU" = "xppc64le" || \ +++ test "x$OPENJDK_TARGET_CPU" = "xriscv64"; then ++ if test "x$OPENJDK_TARGET_OS" = "xlinux"; then ++ AC_MSG_RESULT([yes]) ++ else ++--- a/make/autoconf/libraries.m4 +++++ b/make/autoconf/libraries.m4 ++@@ -1,5 +1,5 @@ ++ # ++-# Copyright (c) 2011, 2021, Oracle and/or its affiliates. All rights reserved. +++# Copyright (c) 2011, 2022, Oracle and/or its affiliates. All rights reserved. ++ # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ # ++ # This code is free software; you can redistribute it and/or modify it ++@@ -146,6 +146,12 @@ AC_DEFUN_ONCE([LIB_SETUP_LIBRARIES], ++ fi ++ fi ++ +++ # Because RISC-V only has word-sized atomics, it requries libatomic where +++ # other common architectures do not. So link libatomic by default. +++ if test "x$OPENJDK_TARGET_OS" = xlinux && test "x$OPENJDK_TARGET_CPU" = xriscv64; then +++ BASIC_JVM_LIBS="$BASIC_JVM_LIBS -latomic" +++ fi +++ ++ # perfstat lib ++ if test "x$OPENJDK_TARGET_OS" = xaix; then ++ BASIC_JVM_LIBS="$BASIC_JVM_LIBS -lperfstat" ++--- a/make/autoconf/platform.m4 +++++ b/make/autoconf/platform.m4 ++@@ -1,5 +1,5 @@ ++ # ++-# Copyright (c) 2011, 2021, Oracle and/or its affiliates. All rights reserved. +++# Copyright (c) 2011, 2022, Oracle and/or its affiliates. All rights reserved. ++ # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ # ++ # This code is free software; you can redistribute it and/or modify it ++@@ -565,6 +565,8 @@ AC_DEFUN([PLATFORM_SETUP_LEGACY_VARS_HEL ++ HOTSPOT_$1_CPU_DEFINE=PPC64 ++ elif test "x$OPENJDK_$1_CPU" = xppc64le; then ++ HOTSPOT_$1_CPU_DEFINE=PPC64 +++ elif test "x$OPENJDK_$1_CPU" = xriscv64; then +++ HOTSPOT_$1_CPU_DEFINE=RISCV64 ++ ++ # The cpu defines below are for zero, we don't support them directly. ++ elif test "x$OPENJDK_$1_CPU" = xsparc; then ++@@ -575,8 +577,6 @@ AC_DEFUN([PLATFORM_SETUP_LEGACY_VARS_HEL ++ HOTSPOT_$1_CPU_DEFINE=S390 ++ elif test "x$OPENJDK_$1_CPU" = xs390x; then ++ HOTSPOT_$1_CPU_DEFINE=S390 ++- elif test "x$OPENJDK_$1_CPU" = xriscv64; then ++- HOTSPOT_$1_CPU_DEFINE=RISCV ++ elif test "x$OPENJDK_$1_CPU" != x; then ++ HOTSPOT_$1_CPU_DEFINE=$(echo $OPENJDK_$1_CPU | tr a-z A-Z) ++ fi ++--- a/make/hotspot/gensrc/GensrcAdlc.gmk +++++ b/make/hotspot/gensrc/GensrcAdlc.gmk ++@@ -1,5 +1,5 @@ ++ # ++-# Copyright (c) 2013, 2021, Oracle and/or its affiliates. All rights reserved. +++# Copyright (c) 2013, 2022, Oracle and/or its affiliates. All rights reserved. ++ # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ # ++ # This code is free software; you can redistribute it and/or modify it ++@@ -149,6 +149,13 @@ ifeq ($(call check-jvm-feature, compiler ++ ))) ++ endif ++ +++ ifeq ($(HOTSPOT_TARGET_CPU_ARCH), riscv) +++ AD_SRC_FILES += $(call uniq, $(wildcard $(foreach d, $(AD_SRC_ROOTS), \ +++ $d/cpu/$(HOTSPOT_TARGET_CPU_ARCH)/$(HOTSPOT_TARGET_CPU_ARCH)_v.ad \ +++ $d/cpu/$(HOTSPOT_TARGET_CPU_ARCH)/$(HOTSPOT_TARGET_CPU_ARCH)_b.ad \ +++ ))) +++ endif +++ ++ ifeq ($(call check-jvm-feature, shenandoahgc), true) ++ AD_SRC_FILES += $(call uniq, $(wildcard $(foreach d, $(AD_SRC_ROOTS), \ ++ $d/cpu/$(HOTSPOT_TARGET_CPU_ARCH)/gc/shenandoah/shenandoah_$(HOTSPOT_TARGET_CPU).ad \ ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/abstractInterpreter_riscv.cpp ++@@ -0,0 +1,177 @@ +++/* +++ * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2014, Red Hat Inc. All rights reserved. +++ * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#include "precompiled.hpp" +++#include "interpreter/interpreter.hpp" +++#include "oops/constMethod.hpp" +++#include "oops/klass.inline.hpp" +++#include "oops/method.hpp" +++#include "runtime/frame.inline.hpp" +++#include "utilities/align.hpp" +++#include "utilities/debug.hpp" +++#include "utilities/macros.hpp" +++ +++int AbstractInterpreter::BasicType_as_index(BasicType type) { +++ int i = 0; +++ switch (type) { +++ case T_BOOLEAN: i = 0; break; +++ case T_CHAR : i = 1; break; +++ case T_BYTE : i = 2; break; +++ case T_SHORT : i = 3; break; +++ case T_INT : i = 4; break; +++ case T_LONG : i = 5; break; +++ case T_VOID : i = 6; break; +++ case T_FLOAT : i = 7; break; +++ case T_DOUBLE : i = 8; break; +++ case T_OBJECT : i = 9; break; +++ case T_ARRAY : i = 9; break; +++ default : ShouldNotReachHere(); +++ } +++ assert(0 <= i && i < AbstractInterpreter::number_of_result_handlers, +++ "index out of bounds"); +++ return i; +++} +++ +++// How much stack a method activation needs in words. +++int AbstractInterpreter::size_top_interpreter_activation(Method* method) { +++ const int entry_size = frame::interpreter_frame_monitor_size(); +++ +++ // total overhead size: entry_size + (saved fp thru expr stack +++ // bottom). be sure to change this if you add/subtract anything +++ // to/from the overhead area +++ const int overhead_size = +++ -(frame::interpreter_frame_initial_sp_offset) + entry_size; +++ +++ const int stub_code = frame::entry_frame_after_call_words; +++ assert_cond(method != NULL); +++ const int method_stack = (method->max_locals() + method->max_stack()) * +++ Interpreter::stackElementWords; +++ return (overhead_size + method_stack + stub_code); +++} +++ +++// asm based interpreter deoptimization helpers +++int AbstractInterpreter::size_activation(int max_stack, +++ int temps, +++ int extra_args, +++ int monitors, +++ int callee_params, +++ int callee_locals, +++ bool is_top_frame) { +++ // Note: This calculation must exactly parallel the frame setup +++ // in TemplateInterpreterGenerator::generate_method_entry. +++ +++ // fixed size of an interpreter frame: +++ int overhead = frame::sender_sp_offset - +++ frame::interpreter_frame_initial_sp_offset; +++ // Our locals were accounted for by the caller (or last_frame_adjust +++ // on the transistion) Since the callee parameters already account +++ // for the callee's params we only need to account for the extra +++ // locals. +++ int size = overhead + +++ (callee_locals - callee_params) + +++ monitors * frame::interpreter_frame_monitor_size() + +++ // On the top frame, at all times SP <= ESP, and SP is +++ // 16-aligned. We ensure this by adjusting SP on method +++ // entry and re-entry to allow room for the maximum size of +++ // the expression stack. When we call another method we bump +++ // SP so that no stack space is wasted. So, only on the top +++ // frame do we need to allow max_stack words. +++ (is_top_frame ? max_stack : temps + extra_args); +++ +++ // On riscv we always keep the stack pointer 16-aligned, so we +++ // must round up here. +++ size = align_up(size, 2); +++ +++ return size; +++} +++ +++void AbstractInterpreter::layout_activation(Method* method, +++ int tempcount, +++ int popframe_extra_args, +++ int moncount, +++ int caller_actual_parameters, +++ int callee_param_count, +++ int callee_locals, +++ frame* caller, +++ frame* interpreter_frame, +++ bool is_top_frame, +++ bool is_bottom_frame) { +++ // The frame interpreter_frame is guaranteed to be the right size, +++ // as determined by a previous call to the size_activation() method. +++ // It is also guaranteed to be walkable even though it is in a +++ // skeletal state +++ assert_cond(method != NULL && caller != NULL && interpreter_frame != NULL); +++ int max_locals = method->max_locals() * Interpreter::stackElementWords; +++ int extra_locals = (method->max_locals() - method->size_of_parameters()) * +++ Interpreter::stackElementWords; +++ +++#ifdef ASSERT +++ assert(caller->sp() == interpreter_frame->sender_sp(), "Frame not properly walkable"); +++#endif +++ +++ interpreter_frame->interpreter_frame_set_method(method); +++ // NOTE the difference in using sender_sp and interpreter_frame_sender_sp +++ // interpreter_frame_sender_sp is the original sp of the caller (the unextended_sp) +++ // and sender_sp is fp +++ intptr_t* locals = NULL; +++ if (caller->is_interpreted_frame()) { +++ locals = caller->interpreter_frame_last_sp() + caller_actual_parameters - 1; +++ } else { +++ locals = interpreter_frame->sender_sp() + max_locals - 1; +++ } +++ +++#ifdef ASSERT +++ if (caller->is_interpreted_frame()) { +++ assert(locals < caller->fp() + frame::interpreter_frame_initial_sp_offset, "bad placement"); +++ } +++#endif +++ +++ interpreter_frame->interpreter_frame_set_locals(locals); +++ BasicObjectLock* montop = interpreter_frame->interpreter_frame_monitor_begin(); +++ BasicObjectLock* monbot = montop - moncount; +++ interpreter_frame->interpreter_frame_set_monitor_end(monbot); +++ +++ // Set last_sp +++ intptr_t* last_sp = (intptr_t*) monbot - +++ tempcount*Interpreter::stackElementWords - +++ popframe_extra_args; +++ interpreter_frame->interpreter_frame_set_last_sp(last_sp); +++ +++ // All frames but the initial (oldest) interpreter frame we fill in have +++ // a value for sender_sp that allows walking the stack but isn't +++ // truly correct. Correct the value here. +++ if (extra_locals != 0 && +++ interpreter_frame->sender_sp() == +++ interpreter_frame->interpreter_frame_sender_sp()) { +++ interpreter_frame->set_interpreter_frame_sender_sp(caller->sp() + +++ extra_locals); +++ } +++ +++ *interpreter_frame->interpreter_frame_cache_addr() = +++ method->constants()->cache(); +++ *interpreter_frame->interpreter_frame_mirror_addr() = +++ method->method_holder()->java_mirror(); +++} ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/assembler_riscv.cpp ++@@ -0,0 +1,78 @@ +++/* +++ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2014, Red Hat Inc. All rights reserved. +++ * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#include +++#include +++ +++#include "precompiled.hpp" +++#include "asm/assembler.hpp" +++#include "asm/assembler.inline.hpp" +++#include "compiler/disassembler.hpp" +++#include "interpreter/interpreter.hpp" +++#include "memory/resourceArea.hpp" +++#include "runtime/interfaceSupport.inline.hpp" +++#include "runtime/sharedRuntime.hpp" +++ +++int AbstractAssembler::code_fill_byte() { +++ return 0; +++} +++ +++Address::Address(address target, relocInfo::relocType rtype) : _base(noreg), _offset(0), _mode(literal) { +++ _target = target; +++ switch (rtype) { +++ case relocInfo::oop_type: +++ case relocInfo::metadata_type: +++ // Oops are a special case. Normally they would be their own section +++ // but in cases like icBuffer they are literals in the code stream that +++ // we don't have a section for. We use none so that we get a literal address +++ // which is always patchable. +++ break; +++ case relocInfo::external_word_type: +++ _rspec = external_word_Relocation::spec(target); +++ break; +++ case relocInfo::internal_word_type: +++ _rspec = internal_word_Relocation::spec(target); +++ break; +++ case relocInfo::opt_virtual_call_type: +++ _rspec = opt_virtual_call_Relocation::spec(); +++ break; +++ case relocInfo::static_call_type: +++ _rspec = static_call_Relocation::spec(); +++ break; +++ case relocInfo::runtime_call_type: +++ _rspec = runtime_call_Relocation::spec(); +++ break; +++ case relocInfo::poll_type: +++ case relocInfo::poll_return_type: +++ _rspec = Relocation::spec_simple(rtype); +++ break; +++ case relocInfo::none: +++ _rspec = RelocationHolder::none; +++ break; +++ default: +++ ShouldNotReachHere(); +++ } +++} ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/assembler_riscv.hpp ++@@ -0,0 +1,2764 @@ +++/* +++ * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved. +++ * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#ifndef CPU_RISCV_ASSEMBLER_RISCV_HPP +++#define CPU_RISCV_ASSEMBLER_RISCV_HPP +++ +++#include "asm/register.hpp" +++#include "code/codeCache.hpp" +++#include "metaprogramming/enableIf.hpp" +++ +++#define XLEN 64 +++ +++// definitions of various symbolic names for machine registers +++ +++// First intercalls between C and Java which use 8 general registers +++// and 8 floating registers +++ +++class Argument { +++ public: +++ enum { +++ n_int_register_parameters_c = 8, // x10, x11, ... x17 (c_rarg0, c_rarg1, ...) +++ n_float_register_parameters_c = 8, // f10, f11, ... f17 (c_farg0, c_farg1, ... ) +++ +++ n_int_register_parameters_j = 8, // x11, ... x17, x10 (j_rarg0, j_rarg1, ...) +++ n_float_register_parameters_j = 8 // f10, f11, ... f17 (j_farg0, j_farg1, ...) +++ }; +++}; +++ +++// function argument(caller-save registers) +++REGISTER_DECLARATION(Register, c_rarg0, x10); +++REGISTER_DECLARATION(Register, c_rarg1, x11); +++REGISTER_DECLARATION(Register, c_rarg2, x12); +++REGISTER_DECLARATION(Register, c_rarg3, x13); +++REGISTER_DECLARATION(Register, c_rarg4, x14); +++REGISTER_DECLARATION(Register, c_rarg5, x15); +++REGISTER_DECLARATION(Register, c_rarg6, x16); +++REGISTER_DECLARATION(Register, c_rarg7, x17); +++ +++REGISTER_DECLARATION(FloatRegister, c_farg0, f10); +++REGISTER_DECLARATION(FloatRegister, c_farg1, f11); +++REGISTER_DECLARATION(FloatRegister, c_farg2, f12); +++REGISTER_DECLARATION(FloatRegister, c_farg3, f13); +++REGISTER_DECLARATION(FloatRegister, c_farg4, f14); +++REGISTER_DECLARATION(FloatRegister, c_farg5, f15); +++REGISTER_DECLARATION(FloatRegister, c_farg6, f16); +++REGISTER_DECLARATION(FloatRegister, c_farg7, f17); +++ +++// Symbolically name the register arguments used by the Java calling convention. +++// We have control over the convention for java so we can do what we please. +++// What pleases us is to offset the java calling convention so that when +++// we call a suitable jni method the arguments are lined up and we don't +++// have to do much shuffling. A suitable jni method is non-static and a +++// small number of arguments. +++// +++// |------------------------------------------------------------------------| +++// | c_rarg0 c_rarg1 c_rarg2 c_rarg3 c_rarg4 c_rarg5 c_rarg6 c_rarg7 | +++// |------------------------------------------------------------------------| +++// | x10 x11 x12 x13 x14 x15 x16 x17 | +++// |------------------------------------------------------------------------| +++// | j_rarg7 j_rarg0 j_rarg1 j_rarg2 j_rarg3 j_rarg4 j_rarg5 j_rarg6 | +++// |------------------------------------------------------------------------| +++ +++REGISTER_DECLARATION(Register, j_rarg0, c_rarg1); +++REGISTER_DECLARATION(Register, j_rarg1, c_rarg2); +++REGISTER_DECLARATION(Register, j_rarg2, c_rarg3); +++REGISTER_DECLARATION(Register, j_rarg3, c_rarg4); +++REGISTER_DECLARATION(Register, j_rarg4, c_rarg5); +++REGISTER_DECLARATION(Register, j_rarg5, c_rarg6); +++REGISTER_DECLARATION(Register, j_rarg6, c_rarg7); +++REGISTER_DECLARATION(Register, j_rarg7, c_rarg0); +++ +++// Java floating args are passed as per C +++ +++REGISTER_DECLARATION(FloatRegister, j_farg0, f10); +++REGISTER_DECLARATION(FloatRegister, j_farg1, f11); +++REGISTER_DECLARATION(FloatRegister, j_farg2, f12); +++REGISTER_DECLARATION(FloatRegister, j_farg3, f13); +++REGISTER_DECLARATION(FloatRegister, j_farg4, f14); +++REGISTER_DECLARATION(FloatRegister, j_farg5, f15); +++REGISTER_DECLARATION(FloatRegister, j_farg6, f16); +++REGISTER_DECLARATION(FloatRegister, j_farg7, f17); +++ +++// zero rigster +++REGISTER_DECLARATION(Register, zr, x0); +++// global pointer +++REGISTER_DECLARATION(Register, gp, x3); +++// thread pointer +++REGISTER_DECLARATION(Register, tp, x4); +++ +++// registers used to hold VM data either temporarily within a method +++// or across method calls +++ +++// volatile (caller-save) registers +++ +++// current method -- must be in a call-clobbered register +++REGISTER_DECLARATION(Register, xmethod, x31); +++// return address +++REGISTER_DECLARATION(Register, ra, x1); +++ +++// non-volatile (callee-save) registers +++ +++// stack pointer +++REGISTER_DECLARATION(Register, sp, x2); +++// frame pointer +++REGISTER_DECLARATION(Register, fp, x8); +++// base of heap +++REGISTER_DECLARATION(Register, xheapbase, x27); +++// constant pool cache +++REGISTER_DECLARATION(Register, xcpool, x26); +++// monitors allocated on stack +++REGISTER_DECLARATION(Register, xmonitors, x25); +++// locals on stack +++REGISTER_DECLARATION(Register, xlocals, x24); +++ +++// java thread pointer +++REGISTER_DECLARATION(Register, xthread, x23); +++// bytecode pointer +++REGISTER_DECLARATION(Register, xbcp, x22); +++// Dispatch table base +++REGISTER_DECLARATION(Register, xdispatch, x21); +++// Java stack pointer +++REGISTER_DECLARATION(Register, esp, x20); +++ +++// temporary register(caller-save registers) +++REGISTER_DECLARATION(Register, t0, x5); +++REGISTER_DECLARATION(Register, t1, x6); +++REGISTER_DECLARATION(Register, t2, x7); +++ +++const Register g_INTArgReg[Argument::n_int_register_parameters_c] = { +++ c_rarg0, c_rarg1, c_rarg2, c_rarg3, c_rarg4, c_rarg5, c_rarg6, c_rarg7 +++}; +++ +++const FloatRegister g_FPArgReg[Argument::n_float_register_parameters_c] = { +++ c_farg0, c_farg1, c_farg2, c_farg3, c_farg4, c_farg5, c_farg6, c_farg7 +++}; +++ +++#define assert_cond(ARG1) assert(ARG1, #ARG1) +++ +++// Addressing modes +++class Address { +++ public: +++ +++ enum mode { no_mode, base_plus_offset, pcrel, literal }; +++ +++ private: +++ Register _base; +++ Register _index; +++ int64_t _offset; +++ enum mode _mode; +++ +++ RelocationHolder _rspec; +++ +++ // If the target is far we'll need to load the ea of this to a +++ // register to reach it. Otherwise if near we can do PC-relative +++ // addressing. +++ address _target; +++ +++ public: +++ Address() +++ : _base(noreg), _index(noreg), _offset(0), _mode(no_mode), _target(NULL) { } +++ +++ Address(Register r) +++ : _base(r), _index(noreg), _offset(0), _mode(base_plus_offset), _target(NULL) { } +++ +++ template::value)> +++ Address(Register r, T o) +++ : _base(r), _index(noreg), _offset(o), _mode(base_plus_offset), _target(NULL) {} +++ +++ Address(Register r, ByteSize disp) +++ : Address(r, in_bytes(disp)) {} +++ +++ Address(address target, RelocationHolder const& rspec) +++ : _base(noreg), +++ _index(noreg), +++ _offset(0), +++ _mode(literal), +++ _rspec(rspec), +++ _target(target) { } +++ +++ Address(address target, relocInfo::relocType rtype = relocInfo::external_word_type); +++ +++ const Register base() const { +++ guarantee((_mode == base_plus_offset | _mode == pcrel | _mode == literal), "wrong mode"); +++ return _base; +++ } +++ long offset() const { +++ return _offset; +++ } +++ Register index() const { +++ return _index; +++ } +++ mode getMode() const { +++ return _mode; +++ } +++ +++ bool uses(Register reg) const { return _base == reg; } +++ const address target() const { return _target; } +++ const RelocationHolder& rspec() const { return _rspec; } +++ ~Address() { +++ _target = NULL; +++ _base = NULL; +++ } +++}; +++ +++// Convience classes +++class RuntimeAddress: public Address { +++ +++ public: +++ +++ RuntimeAddress(address target) : Address(target, relocInfo::runtime_call_type) {} +++ ~RuntimeAddress() {} +++}; +++ +++class OopAddress: public Address { +++ +++ public: +++ +++ OopAddress(address target) : Address(target, relocInfo::oop_type) {} +++ ~OopAddress() {} +++}; +++ +++class ExternalAddress: public Address { +++ private: +++ static relocInfo::relocType reloc_for_target(address target) { +++ // Sometimes ExternalAddress is used for values which aren't +++ // exactly addresses, like the card table base. +++ // external_word_type can't be used for values in the first page +++ // so just skip the reloc in that case. +++ return external_word_Relocation::can_be_relocated(target) ? relocInfo::external_word_type : relocInfo::none; +++ } +++ +++ public: +++ +++ ExternalAddress(address target) : Address(target, reloc_for_target(target)) {} +++ ~ExternalAddress() {} +++}; +++ +++class InternalAddress: public Address { +++ +++ public: +++ +++ InternalAddress(address target) : Address(target, relocInfo::internal_word_type) {} +++ ~InternalAddress() {} +++}; +++ +++class Assembler : public AbstractAssembler { +++public: +++ +++ enum { +++ instruction_size = 4, +++ compressed_instruction_size = 2, +++ }; +++ +++ // instruction must start at passed address +++ static bool is_compressed_instr(address instr) { +++ // The RISC-V ISA Manual, Section 'Base Instruction-Length Encoding': +++ // Instructions are stored in memory as a sequence of 16-bit little-endian parcels, regardless of +++ // memory system endianness. Parcels forming one instruction are stored at increasing halfword +++ // addresses, with the lowest-addressed parcel holding the lowest-numbered bits in the instruction +++ // specification. +++ if (UseRVC && (((uint16_t *)instr)[0] & 0b11) != 0b11) { +++ // 16-bit instructions have their lowest two bits equal to 0b00, 0b01, or 0b10 +++ return true; +++ } +++ // 32-bit instructions have their lowest two bits set to 0b11 +++ return false; +++ } +++ +++ //---< calculate length of instruction >--- +++ // We just use the values set above. +++ // instruction must start at passed address +++ static unsigned int instr_len(address instr) { +++ return is_compressed_instr(instr) ? compressed_instruction_size : instruction_size; +++ } +++ +++ //---< longest instructions >--- +++ static unsigned int instr_maxlen() { return instruction_size; } +++ +++ enum RoundingMode { +++ rne = 0b000, // round to Nearest, ties to Even +++ rtz = 0b001, // round towards Zero +++ rdn = 0b010, // round Down (towards eegative infinity) +++ rup = 0b011, // round Up (towards infinity) +++ rmm = 0b100, // round to Nearest, ties to Max Magnitude +++ rdy = 0b111, // in instruction's rm field, selects dynamic rounding mode.In Rounding Mode register, Invalid. +++ }; +++ +++ static inline uint32_t extract(uint32_t val, unsigned msb, unsigned lsb) { +++ assert_cond(msb >= lsb && msb <= 31); +++ unsigned nbits = msb - lsb + 1; +++ uint32_t mask = (1U << nbits) - 1; +++ uint32_t result = val >> lsb; +++ result &= mask; +++ return result; +++ } +++ +++ static inline int32_t sextract(uint32_t val, unsigned msb, unsigned lsb) { +++ assert_cond(msb >= lsb && msb <= 31); +++ int32_t result = val << (31 - msb); +++ result >>= (31 - msb + lsb); +++ return result; +++ } +++ +++ static void patch(address a, unsigned msb, unsigned lsb, unsigned val) { +++ assert_cond(a != NULL); +++ assert_cond(msb >= lsb && msb <= 31); +++ unsigned nbits = msb - lsb + 1; +++ guarantee(val < (1U << nbits), "Field too big for insn"); +++ unsigned mask = (1U << nbits) - 1; +++ val <<= lsb; +++ mask <<= lsb; +++ unsigned target = *(unsigned *)a; +++ target &= ~mask; +++ target |= val; +++ *(unsigned *)a = target; +++ } +++ +++ static void patch(address a, unsigned bit, unsigned val) { +++ patch(a, bit, bit, val); +++ } +++ +++ static void patch_reg(address a, unsigned lsb, Register reg) { +++ patch(a, lsb + 4, lsb, reg->encoding_nocheck()); +++ } +++ +++ static void patch_reg(address a, unsigned lsb, FloatRegister reg) { +++ patch(a, lsb + 4, lsb, reg->encoding_nocheck()); +++ } +++ +++ static void patch_reg(address a, unsigned lsb, VectorRegister reg) { +++ patch(a, lsb + 4, lsb, reg->encoding_nocheck()); +++ } +++ +++ void emit(unsigned insn) { +++ emit_int32((jint)insn); +++ } +++ +++ enum csr { +++ cycle = 0xc00, +++ time, +++ instret, +++ hpmcounter3, +++ hpmcounter4, +++ hpmcounter5, +++ hpmcounter6, +++ hpmcounter7, +++ hpmcounter8, +++ hpmcounter9, +++ hpmcounter10, +++ hpmcounter11, +++ hpmcounter12, +++ hpmcounter13, +++ hpmcounter14, +++ hpmcounter15, +++ hpmcounter16, +++ hpmcounter17, +++ hpmcounter18, +++ hpmcounter19, +++ hpmcounter20, +++ hpmcounter21, +++ hpmcounter22, +++ hpmcounter23, +++ hpmcounter24, +++ hpmcounter25, +++ hpmcounter26, +++ hpmcounter27, +++ hpmcounter28, +++ hpmcounter29, +++ hpmcounter30, +++ hpmcounter31 = 0xc1f +++ }; +++ +++ // Emit an illegal instruction that's known to trap, with 32 read-only CSR +++ // to choose as the input operand. +++ // According to the RISC-V Assembly Programmer's Manual, a de facto implementation +++ // of this instruction is the UNIMP pseduo-instruction, 'CSRRW x0, cycle, x0', +++ // attempting to write zero to a read-only CSR 'cycle' (0xC00). +++ // RISC-V ISAs provide a set of up to 32 read-only CSR registers 0xC00-0xC1F, +++ // and an attempt to write into any read-only CSR (whether it exists or not) +++ // will generate an illegal instruction exception. +++ void illegal_instruction(csr csr_reg) { +++ csrrw(x0, (unsigned)csr_reg, x0); +++ } +++ +++// Register Instruction +++#define INSN(NAME, op, funct3, funct7) \ +++ void NAME(Register Rd, Register Rs1, Register Rs2) { \ +++ unsigned insn = 0; \ +++ patch((address)&insn, 6, 0, op); \ +++ patch((address)&insn, 14, 12, funct3); \ +++ patch((address)&insn, 31, 25, funct7); \ +++ patch_reg((address)&insn, 7, Rd); \ +++ patch_reg((address)&insn, 15, Rs1); \ +++ patch_reg((address)&insn, 20, Rs2); \ +++ emit(insn); \ +++ } +++ +++ INSN(_add, 0b0110011, 0b000, 0b0000000); +++ INSN(_sub, 0b0110011, 0b000, 0b0100000); +++ INSN(_andr, 0b0110011, 0b111, 0b0000000); +++ INSN(_orr, 0b0110011, 0b110, 0b0000000); +++ INSN(_xorr, 0b0110011, 0b100, 0b0000000); +++ INSN(sll, 0b0110011, 0b001, 0b0000000); +++ INSN(sra, 0b0110011, 0b101, 0b0100000); +++ INSN(srl, 0b0110011, 0b101, 0b0000000); +++ INSN(slt, 0b0110011, 0b010, 0b0000000); +++ INSN(sltu, 0b0110011, 0b011, 0b0000000); +++ INSN(_addw, 0b0111011, 0b000, 0b0000000); +++ INSN(_subw, 0b0111011, 0b000, 0b0100000); +++ INSN(sllw, 0b0111011, 0b001, 0b0000000); +++ INSN(sraw, 0b0111011, 0b101, 0b0100000); +++ INSN(srlw, 0b0111011, 0b101, 0b0000000); +++ INSN(mul, 0b0110011, 0b000, 0b0000001); +++ INSN(mulh, 0b0110011, 0b001, 0b0000001); +++ INSN(mulhsu,0b0110011, 0b010, 0b0000001); +++ INSN(mulhu, 0b0110011, 0b011, 0b0000001); +++ INSN(mulw, 0b0111011, 0b000, 0b0000001); +++ INSN(div, 0b0110011, 0b100, 0b0000001); +++ INSN(divu, 0b0110011, 0b101, 0b0000001); +++ INSN(divw, 0b0111011, 0b100, 0b0000001); +++ INSN(divuw, 0b0111011, 0b101, 0b0000001); +++ INSN(rem, 0b0110011, 0b110, 0b0000001); +++ INSN(remu, 0b0110011, 0b111, 0b0000001); +++ INSN(remw, 0b0111011, 0b110, 0b0000001); +++ INSN(remuw, 0b0111011, 0b111, 0b0000001); +++ +++#undef INSN +++ +++// Load/store register (all modes) +++#define INSN(NAME, op, funct3) \ +++ void NAME(Register Rd, Register Rs, const int32_t offset) { \ +++ guarantee(is_simm12(offset), "offset is invalid."); \ +++ unsigned insn = 0; \ +++ int32_t val = offset & 0xfff; \ +++ patch((address)&insn, 6, 0, op); \ +++ patch((address)&insn, 14, 12, funct3); \ +++ patch_reg((address)&insn, 15, Rs); \ +++ patch_reg((address)&insn, 7, Rd); \ +++ patch((address)&insn, 31, 20, val); \ +++ emit(insn); \ +++ } +++ +++ INSN(lb, 0b0000011, 0b000); +++ INSN(lbu, 0b0000011, 0b100); +++ INSN(lh, 0b0000011, 0b001); +++ INSN(lhu, 0b0000011, 0b101); +++ INSN(_lw, 0b0000011, 0b010); +++ INSN(lwu, 0b0000011, 0b110); +++ INSN(_ld, 0b0000011, 0b011); +++ +++#undef INSN +++ +++#define INSN(NAME, op, funct3) \ +++ void NAME(FloatRegister Rd, Register Rs, const int32_t offset) { \ +++ guarantee(is_simm12(offset), "offset is invalid."); \ +++ unsigned insn = 0; \ +++ uint32_t val = offset & 0xfff; \ +++ patch((address)&insn, 6, 0, op); \ +++ patch((address)&insn, 14, 12, funct3); \ +++ patch_reg((address)&insn, 15, Rs); \ +++ patch_reg((address)&insn, 7, Rd); \ +++ patch((address)&insn, 31, 20, val); \ +++ emit(insn); \ +++ } +++ +++ INSN(flw, 0b0000111, 0b010); +++ INSN(_fld, 0b0000111, 0b011); +++ +++#undef INSN +++ +++#define INSN(NAME, op, funct3) \ +++ void NAME(Register Rs1, Register Rs2, const int64_t offset) { \ +++ guarantee(is_simm13(offset) && ((offset % 2) == 0), "offset is invalid."); \ +++ unsigned insn = 0; \ +++ uint32_t val = offset & 0x1fff; \ +++ uint32_t val11 = (val >> 11) & 0x1; \ +++ uint32_t val12 = (val >> 12) & 0x1; \ +++ uint32_t low = (val >> 1) & 0xf; \ +++ uint32_t high = (val >> 5) & 0x3f; \ +++ patch((address)&insn, 6, 0, op); \ +++ patch((address)&insn, 14, 12, funct3); \ +++ patch_reg((address)&insn, 15, Rs1); \ +++ patch_reg((address)&insn, 20, Rs2); \ +++ patch((address)&insn, 7, val11); \ +++ patch((address)&insn, 11, 8, low); \ +++ patch((address)&insn, 30, 25, high); \ +++ patch((address)&insn, 31, val12); \ +++ emit(insn); \ +++ } +++ +++ INSN(beq, 0b1100011, 0b000); +++ INSN(bne, 0b1100011, 0b001); +++ INSN(bge, 0b1100011, 0b101); +++ INSN(bgeu, 0b1100011, 0b111); +++ INSN(blt, 0b1100011, 0b100); +++ INSN(bltu, 0b1100011, 0b110); +++ +++#undef INSN +++ +++#define INSN(NAME, REGISTER, op, funct3) \ +++ void NAME(REGISTER Rs1, Register Rs2, const int32_t offset) { \ +++ guarantee(is_simm12(offset), "offset is invalid."); \ +++ unsigned insn = 0; \ +++ uint32_t val = offset & 0xfff; \ +++ uint32_t low = val & 0x1f; \ +++ uint32_t high = (val >> 5) & 0x7f; \ +++ patch((address)&insn, 6, 0, op); \ +++ patch((address)&insn, 14, 12, funct3); \ +++ patch_reg((address)&insn, 15, Rs2); \ +++ patch_reg((address)&insn, 20, Rs1); \ +++ patch((address)&insn, 11, 7, low); \ +++ patch((address)&insn, 31, 25, high); \ +++ emit(insn); \ +++ } \ +++ +++ INSN(sb, Register, 0b0100011, 0b000); +++ INSN(sh, Register, 0b0100011, 0b001); +++ INSN(_sw, Register, 0b0100011, 0b010); +++ INSN(_sd, Register, 0b0100011, 0b011); +++ INSN(fsw, FloatRegister, 0b0100111, 0b010); +++ INSN(_fsd, FloatRegister, 0b0100111, 0b011); +++ +++#undef INSN +++ +++#define INSN(NAME, op, funct3) \ +++ void NAME(Register Rd, const uint32_t csr, Register Rs1) { \ +++ guarantee(is_uimm12(csr), "csr is invalid"); \ +++ unsigned insn = 0; \ +++ patch((address)&insn, 6, 0, op); \ +++ patch((address)&insn, 14, 12, funct3); \ +++ patch_reg((address)&insn, 7, Rd); \ +++ patch_reg((address)&insn, 15, Rs1); \ +++ patch((address)&insn, 31, 20, csr); \ +++ emit(insn); \ +++ } +++ +++ INSN(csrrw, 0b1110011, 0b001); +++ INSN(csrrs, 0b1110011, 0b010); +++ INSN(csrrc, 0b1110011, 0b011); +++ +++#undef INSN +++ +++#define INSN(NAME, op, funct3) \ +++ void NAME(Register Rd, const uint32_t csr, const uint32_t uimm) { \ +++ guarantee(is_uimm12(csr), "csr is invalid"); \ +++ guarantee(is_uimm5(uimm), "uimm is invalid"); \ +++ unsigned insn = 0; \ +++ uint32_t val = uimm & 0x1f; \ +++ patch((address)&insn, 6, 0, op); \ +++ patch((address)&insn, 14, 12, funct3); \ +++ patch_reg((address)&insn, 7, Rd); \ +++ patch((address)&insn, 19, 15, val); \ +++ patch((address)&insn, 31, 20, csr); \ +++ emit(insn); \ +++ } +++ +++ INSN(csrrwi, 0b1110011, 0b101); +++ INSN(csrrsi, 0b1110011, 0b110); +++ INSN(csrrci, 0b1110011, 0b111); +++ +++#undef INSN +++ +++#define INSN(NAME, op) \ +++ void NAME(Register Rd, const int32_t offset) { \ +++ guarantee(is_simm21(offset) && ((offset % 2) == 0), "offset is invalid."); \ +++ unsigned insn = 0; \ +++ patch((address)&insn, 6, 0, op); \ +++ patch_reg((address)&insn, 7, Rd); \ +++ patch((address)&insn, 19, 12, (uint32_t)((offset >> 12) & 0xff)); \ +++ patch((address)&insn, 20, (uint32_t)((offset >> 11) & 0x1)); \ +++ patch((address)&insn, 30, 21, (uint32_t)((offset >> 1) & 0x3ff)); \ +++ patch((address)&insn, 31, (uint32_t)((offset >> 20) & 0x1)); \ +++ emit(insn); \ +++ } +++ +++ INSN(jal, 0b1101111); +++ +++#undef INSN +++ +++#define INSN(NAME, op, funct) \ +++ void NAME(Register Rd, Register Rs, const int32_t offset) { \ +++ guarantee(is_simm12(offset), "offset is invalid."); \ +++ unsigned insn = 0; \ +++ patch((address)&insn, 6, 0, op); \ +++ patch_reg((address)&insn, 7, Rd); \ +++ patch((address)&insn, 14, 12, funct); \ +++ patch_reg((address)&insn, 15, Rs); \ +++ int32_t val = offset & 0xfff; \ +++ patch((address)&insn, 31, 20, val); \ +++ emit(insn); \ +++ } +++ +++ INSN(_jalr, 0b1100111, 0b000); +++ +++#undef INSN +++ +++ enum barrier { +++ i = 0b1000, o = 0b0100, r = 0b0010, w = 0b0001, +++ ir = i | r, ow = o | w, iorw = i | o | r | w +++ }; +++ +++ void fence(const uint32_t predecessor, const uint32_t successor) { +++ unsigned insn = 0; +++ guarantee(predecessor < 16, "predecessor is invalid"); +++ guarantee(successor < 16, "successor is invalid"); +++ patch((address)&insn, 6, 0, 0b001111); +++ patch((address)&insn, 11, 7, 0b00000); +++ patch((address)&insn, 14, 12, 0b000); +++ patch((address)&insn, 19, 15, 0b00000); +++ patch((address)&insn, 23, 20, successor); +++ patch((address)&insn, 27, 24, predecessor); +++ patch((address)&insn, 31, 28, 0b0000); +++ emit(insn); +++ } +++ +++#define INSN(NAME, op, funct3, funct7) \ +++ void NAME() { \ +++ unsigned insn = 0; \ +++ patch((address)&insn, 6, 0, op); \ +++ patch((address)&insn, 11, 7, 0b00000); \ +++ patch((address)&insn, 14, 12, funct3); \ +++ patch((address)&insn, 19, 15, 0b00000); \ +++ patch((address)&insn, 31, 20, funct7); \ +++ emit(insn); \ +++ } +++ +++ INSN(ecall, 0b1110011, 0b000, 0b000000000000); +++ INSN(_ebreak, 0b1110011, 0b000, 0b000000000001); +++ +++#undef INSN +++ +++enum Aqrl {relaxed = 0b00, rl = 0b01, aq = 0b10, aqrl = 0b11}; +++ +++#define INSN(NAME, op, funct3, funct7) \ +++ void NAME(Register Rd, Register Rs1, Register Rs2, Aqrl memory_order = aqrl) { \ +++ unsigned insn = 0; \ +++ patch((address)&insn, 6, 0, op); \ +++ patch((address)&insn, 14, 12, funct3); \ +++ patch_reg((address)&insn, 7, Rd); \ +++ patch_reg((address)&insn, 15, Rs1); \ +++ patch_reg((address)&insn, 20, Rs2); \ +++ patch((address)&insn, 31, 27, funct7); \ +++ patch((address)&insn, 26, 25, memory_order); \ +++ emit(insn); \ +++ } +++ +++ INSN(amoswap_w, 0b0101111, 0b010, 0b00001); +++ INSN(amoadd_w, 0b0101111, 0b010, 0b00000); +++ INSN(amoxor_w, 0b0101111, 0b010, 0b00100); +++ INSN(amoand_w, 0b0101111, 0b010, 0b01100); +++ INSN(amoor_w, 0b0101111, 0b010, 0b01000); +++ INSN(amomin_w, 0b0101111, 0b010, 0b10000); +++ INSN(amomax_w, 0b0101111, 0b010, 0b10100); +++ INSN(amominu_w, 0b0101111, 0b010, 0b11000); +++ INSN(amomaxu_w, 0b0101111, 0b010, 0b11100); +++ INSN(amoswap_d, 0b0101111, 0b011, 0b00001); +++ INSN(amoadd_d, 0b0101111, 0b011, 0b00000); +++ INSN(amoxor_d, 0b0101111, 0b011, 0b00100); +++ INSN(amoand_d, 0b0101111, 0b011, 0b01100); +++ INSN(amoor_d, 0b0101111, 0b011, 0b01000); +++ INSN(amomin_d, 0b0101111, 0b011, 0b10000); +++ INSN(amomax_d , 0b0101111, 0b011, 0b10100); +++ INSN(amominu_d, 0b0101111, 0b011, 0b11000); +++ INSN(amomaxu_d, 0b0101111, 0b011, 0b11100); +++#undef INSN +++ +++enum operand_size { int8, int16, int32, uint32, int64 }; +++ +++#define INSN(NAME, op, funct3, funct7) \ +++ void NAME(Register Rd, Register Rs1, Aqrl memory_order = relaxed) { \ +++ unsigned insn = 0; \ +++ uint32_t val = memory_order & 0x3; \ +++ patch((address)&insn, 6, 0, op); \ +++ patch((address)&insn, 14, 12, funct3); \ +++ patch_reg((address)&insn, 7, Rd); \ +++ patch_reg((address)&insn, 15, Rs1); \ +++ patch((address)&insn, 25, 20, 0b00000); \ +++ patch((address)&insn, 31, 27, funct7); \ +++ patch((address)&insn, 26, 25, val); \ +++ emit(insn); \ +++ } +++ +++ INSN(lr_w, 0b0101111, 0b010, 0b00010); +++ INSN(lr_d, 0b0101111, 0b011, 0b00010); +++ +++#undef INSN +++ +++#define INSN(NAME, op, funct3, funct7) \ +++ void NAME(Register Rd, Register Rs1, Register Rs2, Aqrl memory_order = relaxed) { \ +++ unsigned insn = 0; \ +++ uint32_t val = memory_order & 0x3; \ +++ patch((address)&insn, 6, 0, op); \ +++ patch((address)&insn, 14, 12, funct3); \ +++ patch_reg((address)&insn, 7, Rd); \ +++ patch_reg((address)&insn, 15, Rs2); \ +++ patch_reg((address)&insn, 20, Rs1); \ +++ patch((address)&insn, 31, 27, funct7); \ +++ patch((address)&insn, 26, 25, val); \ +++ emit(insn); \ +++ } +++ +++ INSN(sc_w, 0b0101111, 0b010, 0b00011); +++ INSN(sc_d, 0b0101111, 0b011, 0b00011); +++#undef INSN +++ +++#define INSN(NAME, op, funct5, funct7) \ +++ void NAME(FloatRegister Rd, FloatRegister Rs1, RoundingMode rm = rne) { \ +++ unsigned insn = 0; \ +++ patch((address)&insn, 6, 0, op); \ +++ patch((address)&insn, 14, 12, rm); \ +++ patch((address)&insn, 24, 20, funct5); \ +++ patch((address)&insn, 31, 25, funct7); \ +++ patch_reg((address)&insn, 7, Rd); \ +++ patch_reg((address)&insn, 15, Rs1); \ +++ emit(insn); \ +++ } +++ +++ INSN(fsqrt_s, 0b1010011, 0b00000, 0b0101100); +++ INSN(fsqrt_d, 0b1010011, 0b00000, 0b0101101); +++ INSN(fcvt_s_d, 0b1010011, 0b00001, 0b0100000); +++ INSN(fcvt_d_s, 0b1010011, 0b00000, 0b0100001); +++#undef INSN +++ +++// Immediate Instruction +++#define INSN(NAME, op, funct3) \ +++ void NAME(Register Rd, Register Rs1, int32_t imm) { \ +++ guarantee(is_simm12(imm), "Immediate is out of validity"); \ +++ unsigned insn = 0; \ +++ patch((address)&insn, 6, 0, op); \ +++ patch((address)&insn, 14, 12, funct3); \ +++ patch((address)&insn, 31, 20, imm & 0x00000fff); \ +++ patch_reg((address)&insn, 7, Rd); \ +++ patch_reg((address)&insn, 15, Rs1); \ +++ emit(insn); \ +++ } +++ +++ INSN(_addi, 0b0010011, 0b000); +++ INSN(slti, 0b0010011, 0b010); +++ INSN(_addiw, 0b0011011, 0b000); +++ INSN(_and_imm12, 0b0010011, 0b111); +++ INSN(ori, 0b0010011, 0b110); +++ INSN(xori, 0b0010011, 0b100); +++ +++#undef INSN +++ +++#define INSN(NAME, op, funct3) \ +++ void NAME(Register Rd, Register Rs1, uint32_t imm) { \ +++ guarantee(is_uimm12(imm), "Immediate is out of validity"); \ +++ unsigned insn = 0; \ +++ patch((address)&insn,6, 0, op); \ +++ patch((address)&insn, 14, 12, funct3); \ +++ patch((address)&insn, 31, 20, imm & 0x00000fff); \ +++ patch_reg((address)&insn, 7, Rd); \ +++ patch_reg((address)&insn, 15, Rs1); \ +++ emit(insn); \ +++ } +++ +++ INSN(sltiu, 0b0010011, 0b011); +++ +++#undef INSN +++ +++// Shift Immediate Instruction +++#define INSN(NAME, op, funct3, funct6) \ +++ void NAME(Register Rd, Register Rs1, unsigned shamt) { \ +++ guarantee(shamt <= 0x3f, "Shamt is invalid"); \ +++ unsigned insn = 0; \ +++ patch((address)&insn, 6, 0, op); \ +++ patch((address)&insn, 14, 12, funct3); \ +++ patch((address)&insn, 25, 20, shamt); \ +++ patch((address)&insn, 31, 26, funct6); \ +++ patch_reg((address)&insn, 7, Rd); \ +++ patch_reg((address)&insn, 15, Rs1); \ +++ emit(insn); \ +++ } +++ +++ INSN(_slli, 0b0010011, 0b001, 0b000000); +++ INSN(_srai, 0b0010011, 0b101, 0b010000); +++ INSN(_srli, 0b0010011, 0b101, 0b000000); +++ +++#undef INSN +++ +++// Shift Word Immediate Instruction +++#define INSN(NAME, op, funct3, funct7) \ +++ void NAME(Register Rd, Register Rs1, unsigned shamt) { \ +++ guarantee(shamt <= 0x1f, "Shamt is invalid"); \ +++ unsigned insn = 0; \ +++ patch((address)&insn, 6, 0, op); \ +++ patch((address)&insn, 14, 12, funct3); \ +++ patch((address)&insn, 24, 20, shamt); \ +++ patch((address)&insn, 31, 25, funct7); \ +++ patch_reg((address)&insn, 7, Rd); \ +++ patch_reg((address)&insn, 15, Rs1); \ +++ emit(insn); \ +++ } +++ +++ INSN(slliw, 0b0011011, 0b001, 0b0000000); +++ INSN(sraiw, 0b0011011, 0b101, 0b0100000); +++ INSN(srliw, 0b0011011, 0b101, 0b0000000); +++ +++#undef INSN +++ +++// Upper Immediate Instruction +++#define INSN(NAME, op) \ +++ void NAME(Register Rd, int32_t imm) { \ +++ int32_t upperImm = imm >> 12; \ +++ unsigned insn = 0; \ +++ patch((address)&insn, 6, 0, op); \ +++ patch_reg((address)&insn, 7, Rd); \ +++ upperImm &= 0x000fffff; \ +++ patch((address)&insn, 31, 12, upperImm); \ +++ emit(insn); \ +++ } +++ +++ INSN(_lui, 0b0110111); +++ INSN(auipc, 0b0010111); +++ +++#undef INSN +++ +++// Float and Double Rigster Instruction +++#define INSN(NAME, op, funct2) \ +++ void NAME(FloatRegister Rd, FloatRegister Rs1, FloatRegister Rs2, FloatRegister Rs3, RoundingMode rm = rne) { \ +++ unsigned insn = 0; \ +++ patch((address)&insn, 6, 0, op); \ +++ patch((address)&insn, 14, 12, rm); \ +++ patch((address)&insn, 26, 25, funct2); \ +++ patch_reg((address)&insn, 7, Rd); \ +++ patch_reg((address)&insn, 15, Rs1); \ +++ patch_reg((address)&insn, 20, Rs2); \ +++ patch_reg((address)&insn, 27, Rs3); \ +++ emit(insn); \ +++ } +++ +++ INSN(fmadd_s, 0b1000011, 0b00); +++ INSN(fmsub_s, 0b1000111, 0b00); +++ INSN(fnmsub_s, 0b1001011, 0b00); +++ INSN(fnmadd_s, 0b1001111, 0b00); +++ INSN(fmadd_d, 0b1000011, 0b01); +++ INSN(fmsub_d, 0b1000111, 0b01); +++ INSN(fnmsub_d, 0b1001011, 0b01); +++ INSN(fnmadd_d, 0b1001111, 0b01); +++ +++#undef INSN +++ +++// Float and Double Rigster Instruction +++#define INSN(NAME, op, funct3, funct7) \ +++ void NAME(FloatRegister Rd, FloatRegister Rs1, FloatRegister Rs2) { \ +++ unsigned insn = 0; \ +++ patch((address)&insn, 6, 0, op); \ +++ patch((address)&insn, 14, 12, funct3); \ +++ patch((address)&insn, 31, 25, funct7); \ +++ patch_reg((address)&insn, 7, Rd); \ +++ patch_reg((address)&insn, 15, Rs1); \ +++ patch_reg((address)&insn, 20, Rs2); \ +++ emit(insn); \ +++ } +++ +++ INSN(fsgnj_s, 0b1010011, 0b000, 0b0010000); +++ INSN(fsgnjn_s, 0b1010011, 0b001, 0b0010000); +++ INSN(fsgnjx_s, 0b1010011, 0b010, 0b0010000); +++ INSN(fmin_s, 0b1010011, 0b000, 0b0010100); +++ INSN(fmax_s, 0b1010011, 0b001, 0b0010100); +++ INSN(fsgnj_d, 0b1010011, 0b000, 0b0010001); +++ INSN(fsgnjn_d, 0b1010011, 0b001, 0b0010001); +++ INSN(fsgnjx_d, 0b1010011, 0b010, 0b0010001); +++ INSN(fmin_d, 0b1010011, 0b000, 0b0010101); +++ INSN(fmax_d, 0b1010011, 0b001, 0b0010101); +++ +++#undef INSN +++ +++// Float and Double Rigster Arith Instruction +++#define INSN(NAME, op, funct3, funct7) \ +++ void NAME(Register Rd, FloatRegister Rs1, FloatRegister Rs2) { \ +++ unsigned insn = 0; \ +++ patch((address)&insn, 6, 0, op); \ +++ patch((address)&insn, 14, 12, funct3); \ +++ patch((address)&insn, 31, 25, funct7); \ +++ patch_reg((address)&insn, 7, Rd); \ +++ patch_reg((address)&insn, 15, Rs1); \ +++ patch_reg((address)&insn, 20, Rs2); \ +++ emit(insn); \ +++ } +++ +++ INSN(feq_s, 0b1010011, 0b010, 0b1010000); +++ INSN(flt_s, 0b1010011, 0b001, 0b1010000); +++ INSN(fle_s, 0b1010011, 0b000, 0b1010000); +++ INSN(feq_d, 0b1010011, 0b010, 0b1010001); +++ INSN(fle_d, 0b1010011, 0b000, 0b1010001); +++ INSN(flt_d, 0b1010011, 0b001, 0b1010001); +++#undef INSN +++ +++// Float and Double Arith Instruction +++#define INSN(NAME, op, funct7) \ +++ void NAME(FloatRegister Rd, FloatRegister Rs1, FloatRegister Rs2, RoundingMode rm = rne) { \ +++ unsigned insn = 0; \ +++ patch((address)&insn, 6, 0, op); \ +++ patch((address)&insn, 14, 12, rm); \ +++ patch((address)&insn, 31, 25, funct7); \ +++ patch_reg((address)&insn, 7, Rd); \ +++ patch_reg((address)&insn, 15, Rs1); \ +++ patch_reg((address)&insn, 20, Rs2); \ +++ emit(insn); \ +++ } +++ +++ INSN(fadd_s, 0b1010011, 0b0000000); +++ INSN(fsub_s, 0b1010011, 0b0000100); +++ INSN(fmul_s, 0b1010011, 0b0001000); +++ INSN(fdiv_s, 0b1010011, 0b0001100); +++ INSN(fadd_d, 0b1010011, 0b0000001); +++ INSN(fsub_d, 0b1010011, 0b0000101); +++ INSN(fmul_d, 0b1010011, 0b0001001); +++ INSN(fdiv_d, 0b1010011, 0b0001101); +++ +++#undef INSN +++ +++// Whole Float and Double Conversion Instruction +++#define INSN(NAME, op, funct5, funct7) \ +++ void NAME(FloatRegister Rd, Register Rs1, RoundingMode rm = rne) { \ +++ unsigned insn = 0; \ +++ patch((address)&insn, 6, 0, op); \ +++ patch((address)&insn, 14, 12, rm); \ +++ patch((address)&insn, 24, 20, funct5); \ +++ patch((address)&insn, 31, 25, funct7); \ +++ patch_reg((address)&insn, 7, Rd); \ +++ patch_reg((address)&insn, 15, Rs1); \ +++ emit(insn); \ +++ } +++ +++ INSN(fcvt_s_w, 0b1010011, 0b00000, 0b1101000); +++ INSN(fcvt_s_wu, 0b1010011, 0b00001, 0b1101000); +++ INSN(fcvt_s_l, 0b1010011, 0b00010, 0b1101000); +++ INSN(fcvt_s_lu, 0b1010011, 0b00011, 0b1101000); +++ INSN(fcvt_d_w, 0b1010011, 0b00000, 0b1101001); +++ INSN(fcvt_d_wu, 0b1010011, 0b00001, 0b1101001); +++ INSN(fcvt_d_l, 0b1010011, 0b00010, 0b1101001); +++ INSN(fcvt_d_lu, 0b1010011, 0b00011, 0b1101001); +++ +++#undef INSN +++ +++// Float and Double Conversion Instruction +++#define INSN(NAME, op, funct5, funct7) \ +++ void NAME(Register Rd, FloatRegister Rs1, RoundingMode rm = rtz) { \ +++ unsigned insn = 0; \ +++ patch((address)&insn, 6, 0, op); \ +++ patch((address)&insn, 14, 12, rm); \ +++ patch((address)&insn, 24, 20, funct5); \ +++ patch((address)&insn, 31, 25, funct7); \ +++ patch_reg((address)&insn, 7, Rd); \ +++ patch_reg((address)&insn, 15, Rs1); \ +++ emit(insn); \ +++ } +++ +++ INSN(fcvt_w_s, 0b1010011, 0b00000, 0b1100000); +++ INSN(fcvt_l_s, 0b1010011, 0b00010, 0b1100000); +++ INSN(fcvt_wu_s, 0b1010011, 0b00001, 0b1100000); +++ INSN(fcvt_lu_s, 0b1010011, 0b00011, 0b1100000); +++ INSN(fcvt_w_d, 0b1010011, 0b00000, 0b1100001); +++ INSN(fcvt_wu_d, 0b1010011, 0b00001, 0b1100001); +++ INSN(fcvt_l_d, 0b1010011, 0b00010, 0b1100001); +++ INSN(fcvt_lu_d, 0b1010011, 0b00011, 0b1100001); +++ +++#undef INSN +++ +++// Float and Double Move Instruction +++#define INSN(NAME, op, funct3, funct5, funct7) \ +++ void NAME(FloatRegister Rd, Register Rs1) { \ +++ unsigned insn = 0; \ +++ patch((address)&insn, 6, 0, op); \ +++ patch((address)&insn, 14, 12, funct3); \ +++ patch((address)&insn, 20, funct5); \ +++ patch((address)&insn, 31, 25, funct7); \ +++ patch_reg((address)&insn, 7, Rd); \ +++ patch_reg((address)&insn, 15, Rs1); \ +++ emit(insn); \ +++ } +++ +++ INSN(fmv_w_x, 0b1010011, 0b000, 0b00000, 0b1111000); +++ INSN(fmv_d_x, 0b1010011, 0b000, 0b00000, 0b1111001); +++ +++#undef INSN +++ +++// Float and Double Conversion Instruction +++#define INSN(NAME, op, funct3, funct5, funct7) \ +++ void NAME(Register Rd, FloatRegister Rs1) { \ +++ unsigned insn = 0; \ +++ patch((address)&insn, 6, 0, op); \ +++ patch((address)&insn, 14, 12, funct3); \ +++ patch((address)&insn, 20, funct5); \ +++ patch((address)&insn, 31, 25, funct7); \ +++ patch_reg((address)&insn, 7, Rd); \ +++ patch_reg((address)&insn, 15, Rs1); \ +++ emit(insn); \ +++ } +++ +++ INSN(fclass_s, 0b1010011, 0b001, 0b00000, 0b1110000); +++ INSN(fclass_d, 0b1010011, 0b001, 0b00000, 0b1110001); +++ INSN(fmv_x_w, 0b1010011, 0b000, 0b00000, 0b1110000); +++ INSN(fmv_x_d, 0b1010011, 0b000, 0b00000, 0b1110001); +++ +++#undef INSN +++ +++// ========================== +++// RISC-V Vector Extension +++// ========================== +++enum SEW { +++ e8, +++ e16, +++ e32, +++ e64, +++ RESERVED, +++}; +++ +++enum LMUL { +++ mf8 = 0b101, +++ mf4 = 0b110, +++ mf2 = 0b111, +++ m1 = 0b000, +++ m2 = 0b001, +++ m4 = 0b010, +++ m8 = 0b011, +++}; +++ +++enum VMA { +++ mu, // undisturbed +++ ma, // agnostic +++}; +++ +++enum VTA { +++ tu, // undisturbed +++ ta, // agnostic +++}; +++ +++static Assembler::SEW elembytes_to_sew(int ebytes) { +++ assert(ebytes > 0 && ebytes <= 8, "unsupported element size"); +++ return (Assembler::SEW) exact_log2(ebytes); +++} +++ +++static Assembler::SEW elemtype_to_sew(BasicType etype) { +++ return Assembler::elembytes_to_sew(type2aelembytes(etype)); +++} +++ +++#define patch_vtype(hsb, lsb, vlmul, vsew, vta, vma, vill) \ +++ if (vill == 1) { \ +++ guarantee((vlmul | vsew | vta | vma == 0), \ +++ "the other bits in vtype shall be zero"); \ +++ } \ +++ patch((address)&insn, lsb + 2, lsb, vlmul); \ +++ patch((address)&insn, lsb + 5, lsb + 3, vsew); \ +++ patch((address)&insn, lsb + 6, vta); \ +++ patch((address)&insn, lsb + 7, vma); \ +++ patch((address)&insn, hsb - 1, lsb + 8, 0); \ +++ patch((address)&insn, hsb, vill) +++ +++#define INSN(NAME, op, funct3) \ +++ void NAME(Register Rd, Register Rs1, SEW sew, LMUL lmul = m1, \ +++ VMA vma = mu, VTA vta = tu, bool vill = false) { \ +++ unsigned insn = 0; \ +++ patch((address)&insn, 6, 0, op); \ +++ patch((address)&insn, 14, 12, funct3); \ +++ patch_vtype(30, 20, lmul, sew, vta, vma, vill); \ +++ patch((address)&insn, 31, 0); \ +++ patch_reg((address)&insn, 7, Rd); \ +++ patch_reg((address)&insn, 15, Rs1); \ +++ emit(insn); \ +++ } +++ +++ INSN(vsetvli, 0b1010111, 0b111); +++ +++#undef INSN +++ +++#define INSN(NAME, op, funct3) \ +++ void NAME(Register Rd, uint32_t imm, SEW sew, LMUL lmul = m1, \ +++ VMA vma = mu, VTA vta = tu, bool vill = false) { \ +++ unsigned insn = 0; \ +++ guarantee(is_uimm5(imm), "imm is invalid"); \ +++ patch((address)&insn, 6, 0, op); \ +++ patch((address)&insn, 14, 12, funct3); \ +++ patch((address)&insn, 19, 15, imm); \ +++ patch_vtype(29, 20, lmul, sew, vta, vma, vill); \ +++ patch((address)&insn, 31, 30, 0b11); \ +++ patch_reg((address)&insn, 7, Rd); \ +++ emit(insn); \ +++ } +++ +++ INSN(vsetivli, 0b1010111, 0b111); +++ +++#undef INSN +++ +++#undef patch_vtype +++ +++#define INSN(NAME, op, funct3, funct7) \ +++ void NAME(Register Rd, Register Rs1, Register Rs2) { \ +++ unsigned insn = 0; \ +++ patch((address)&insn, 6, 0, op); \ +++ patch((address)&insn, 14, 12, funct3); \ +++ patch((address)&insn, 31, 25, funct7); \ +++ patch_reg((address)&insn, 7, Rd); \ +++ patch_reg((address)&insn, 15, Rs1); \ +++ patch_reg((address)&insn, 20, Rs2); \ +++ emit(insn); \ +++ } +++ +++ // Vector Configuration Instruction +++ INSN(vsetvl, 0b1010111, 0b111, 0b1000000); +++ +++#undef INSN +++ +++enum VectorMask { +++ v0_t = 0b0, +++ unmasked = 0b1 +++}; +++ +++#define patch_VArith(op, Reg, funct3, Reg_or_Imm5, Vs2, vm, funct6) \ +++ unsigned insn = 0; \ +++ patch((address)&insn, 6, 0, op); \ +++ patch((address)&insn, 14, 12, funct3); \ +++ patch((address)&insn, 19, 15, Reg_or_Imm5); \ +++ patch((address)&insn, 25, vm); \ +++ patch((address)&insn, 31, 26, funct6); \ +++ patch_reg((address)&insn, 7, Reg); \ +++ patch_reg((address)&insn, 20, Vs2); \ +++ emit(insn) +++ +++// r2_vm +++#define INSN(NAME, op, funct3, Vs1, funct6) \ +++ void NAME(Register Rd, VectorRegister Vs2, VectorMask vm = unmasked) { \ +++ patch_VArith(op, Rd, funct3, Vs1, Vs2, vm, funct6); \ +++ } +++ +++ // Vector Mask +++ INSN(vcpop_m, 0b1010111, 0b010, 0b10000, 0b010000); +++ INSN(vfirst_m, 0b1010111, 0b010, 0b10001, 0b010000); +++#undef INSN +++ +++#define INSN(NAME, op, funct3, Vs1, funct6) \ +++ void NAME(VectorRegister Vd, VectorRegister Vs2, VectorMask vm = unmasked) { \ +++ patch_VArith(op, Vd, funct3, Vs1, Vs2, vm, funct6); \ +++ } +++ +++ // Vector Integer Extension +++ INSN(vzext_vf2, 0b1010111, 0b010, 0b00110, 0b010010); +++ INSN(vzext_vf4, 0b1010111, 0b010, 0b00100, 0b010010); +++ INSN(vzext_vf8, 0b1010111, 0b010, 0b00010, 0b010010); +++ INSN(vsext_vf2, 0b1010111, 0b010, 0b00111, 0b010010); +++ INSN(vsext_vf4, 0b1010111, 0b010, 0b00101, 0b010010); +++ INSN(vsext_vf8, 0b1010111, 0b010, 0b00011, 0b010010); +++ +++ // Vector Mask +++ INSN(vmsbf_m, 0b1010111, 0b010, 0b00001, 0b010100); +++ INSN(vmsif_m, 0b1010111, 0b010, 0b00011, 0b010100); +++ INSN(vmsof_m, 0b1010111, 0b010, 0b00010, 0b010100); +++ INSN(viota_m, 0b1010111, 0b010, 0b10000, 0b010100); +++ +++ // Vector Single-Width Floating-Point/Integer Type-Convert Instructions +++ INSN(vfcvt_xu_f_v, 0b1010111, 0b001, 0b00000, 0b010010); +++ INSN(vfcvt_x_f_v, 0b1010111, 0b001, 0b00001, 0b010010); +++ INSN(vfcvt_f_xu_v, 0b1010111, 0b001, 0b00010, 0b010010); +++ INSN(vfcvt_f_x_v, 0b1010111, 0b001, 0b00011, 0b010010); +++ INSN(vfcvt_rtz_xu_f_v, 0b1010111, 0b001, 0b00110, 0b010010); +++ INSN(vfcvt_rtz_x_f_v, 0b1010111, 0b001, 0b00111, 0b010010); +++ +++ // Vector Floating-Point Instruction +++ INSN(vfsqrt_v, 0b1010111, 0b001, 0b00000, 0b010011); +++ INSN(vfclass_v, 0b1010111, 0b001, 0b10000, 0b010011); +++ +++#undef INSN +++ +++// r2rd +++#define INSN(NAME, op, funct3, simm5, vm, funct6) \ +++ void NAME(VectorRegister Vd, VectorRegister Vs2) { \ +++ patch_VArith(op, Vd, funct3, simm5, Vs2, vm, funct6); \ +++ } +++ +++ // Vector Whole Vector Register Move +++ INSN(vmv1r_v, 0b1010111, 0b011, 0b00000, 0b1, 0b100111); +++ INSN(vmv2r_v, 0b1010111, 0b011, 0b00001, 0b1, 0b100111); +++ INSN(vmv4r_v, 0b1010111, 0b011, 0b00011, 0b1, 0b100111); +++ INSN(vmv8r_v, 0b1010111, 0b011, 0b00111, 0b1, 0b100111); +++ +++#undef INSN +++ +++#define INSN(NAME, op, funct3, Vs1, vm, funct6) \ +++ void NAME(FloatRegister Rd, VectorRegister Vs2) { \ +++ patch_VArith(op, Rd, funct3, Vs1, Vs2, vm, funct6); \ +++ } +++ +++ // Vector Floating-Point Move Instruction +++ INSN(vfmv_f_s, 0b1010111, 0b001, 0b00000, 0b1, 0b010000); +++ +++#undef INSN +++ +++#define INSN(NAME, op, funct3, Vs1, vm, funct6) \ +++ void NAME(Register Rd, VectorRegister Vs2) { \ +++ patch_VArith(op, Rd, funct3, Vs1, Vs2, vm, funct6); \ +++ } +++ +++ // Vector Integer Scalar Move Instructions +++ INSN(vmv_x_s, 0b1010111, 0b010, 0b00000, 0b1, 0b010000); +++ +++#undef INSN +++ +++// r_vm +++#define INSN(NAME, op, funct3, funct6) \ +++ void NAME(VectorRegister Vd, VectorRegister Vs2, uint32_t imm, VectorMask vm = unmasked) { \ +++ guarantee(is_uimm5(imm), "imm is invalid"); \ +++ patch_VArith(op, Vd, funct3, (uint32_t)(imm & 0x1f), Vs2, vm, funct6); \ +++ } +++ +++ // Vector Single-Width Bit Shift Instructions +++ INSN(vsra_vi, 0b1010111, 0b011, 0b101001); +++ INSN(vsrl_vi, 0b1010111, 0b011, 0b101000); +++ INSN(vsll_vi, 0b1010111, 0b011, 0b100101); +++ +++#undef INSN +++ +++#define INSN(NAME, op, funct3, funct6) \ +++ void NAME(VectorRegister Vd, VectorRegister Vs1, VectorRegister Vs2, VectorMask vm = unmasked) { \ +++ patch_VArith(op, Vd, funct3, Vs1->encoding_nocheck(), Vs2, vm, funct6); \ +++ } +++ +++ // Vector Single-Width Floating-Point Fused Multiply-Add Instructions +++ INSN(vfnmsub_vv, 0b1010111, 0b001, 0b101011); +++ INSN(vfmsub_vv, 0b1010111, 0b001, 0b101010); +++ INSN(vfnmadd_vv, 0b1010111, 0b001, 0b101001); +++ INSN(vfmadd_vv, 0b1010111, 0b001, 0b101000); +++ INSN(vfnmsac_vv, 0b1010111, 0b001, 0b101111); +++ INSN(vfmsac_vv, 0b1010111, 0b001, 0b101110); +++ INSN(vfmacc_vv, 0b1010111, 0b001, 0b101100); +++ INSN(vfnmacc_vv, 0b1010111, 0b001, 0b101101); +++ +++ // Vector Single-Width Integer Multiply-Add Instructions +++ INSN(vnmsub_vv, 0b1010111, 0b010, 0b101011); +++ INSN(vmadd_vv, 0b1010111, 0b010, 0b101001); +++ INSN(vnmsac_vv, 0b1010111, 0b010, 0b101111); +++ INSN(vmacc_vv, 0b1010111, 0b010, 0b101101); +++ +++#undef INSN +++ +++#define INSN(NAME, op, funct3, funct6) \ +++ void NAME(VectorRegister Vd, Register Rs1, VectorRegister Vs2, VectorMask vm = unmasked) { \ +++ patch_VArith(op, Vd, funct3, Rs1->encoding_nocheck(), Vs2, vm, funct6); \ +++ } +++ +++ // Vector Single-Width Integer Multiply-Add Instructions +++ INSN(vnmsub_vx, 0b1010111, 0b110, 0b101011); +++ INSN(vmadd_vx, 0b1010111, 0b110, 0b101001); +++ INSN(vnmsac_vx, 0b1010111, 0b110, 0b101111); +++ INSN(vmacc_vx, 0b1010111, 0b110, 0b101101); +++ +++#undef INSN +++ +++#define INSN(NAME, op, funct3, funct6) \ +++ void NAME(VectorRegister Vd, FloatRegister Rs1, VectorRegister Vs2, VectorMask vm = unmasked) { \ +++ patch_VArith(op, Vd, funct3, Rs1->encoding_nocheck(), Vs2, vm, funct6); \ +++ } +++ +++ // Vector Single-Width Floating-Point Fused Multiply-Add Instructions +++ INSN(vfnmsub_vf, 0b1010111, 0b101, 0b101011); +++ INSN(vfmsub_vf, 0b1010111, 0b101, 0b101010); +++ INSN(vfnmadd_vf, 0b1010111, 0b101, 0b101001); +++ INSN(vfmadd_vf, 0b1010111, 0b101, 0b101000); +++ INSN(vfnmsac_vf, 0b1010111, 0b101, 0b101111); +++ INSN(vfmsac_vf, 0b1010111, 0b101, 0b101110); +++ INSN(vfmacc_vf, 0b1010111, 0b101, 0b101100); +++ INSN(vfnmacc_vf, 0b1010111, 0b101, 0b101101); +++ +++#undef INSN +++ +++#define INSN(NAME, op, funct3, funct6) \ +++ void NAME(VectorRegister Vd, VectorRegister Vs2, VectorRegister Vs1, VectorMask vm = unmasked) { \ +++ patch_VArith(op, Vd, funct3, Vs1->encoding_nocheck(), Vs2, vm, funct6); \ +++ } +++ +++ // Vector Single-Width Floating-Point Reduction Instructions +++ INSN(vfredusum_vs, 0b1010111, 0b001, 0b000001); +++ INSN(vfredosum_vs, 0b1010111, 0b001, 0b000011); +++ INSN(vfredmin_vs, 0b1010111, 0b001, 0b000101); +++ INSN(vfredmax_vs, 0b1010111, 0b001, 0b000111); +++ +++ // Vector Single-Width Integer Reduction Instructions +++ INSN(vredsum_vs, 0b1010111, 0b010, 0b000000); +++ INSN(vredand_vs, 0b1010111, 0b010, 0b000001); +++ INSN(vredor_vs, 0b1010111, 0b010, 0b000010); +++ INSN(vredxor_vs, 0b1010111, 0b010, 0b000011); +++ INSN(vredminu_vs, 0b1010111, 0b010, 0b000100); +++ INSN(vredmin_vs, 0b1010111, 0b010, 0b000101); +++ INSN(vredmaxu_vs, 0b1010111, 0b010, 0b000110); +++ INSN(vredmax_vs, 0b1010111, 0b010, 0b000111); +++ +++ // Vector Floating-Point Compare Instructions +++ INSN(vmfle_vv, 0b1010111, 0b001, 0b011001); +++ INSN(vmflt_vv, 0b1010111, 0b001, 0b011011); +++ INSN(vmfne_vv, 0b1010111, 0b001, 0b011100); +++ INSN(vmfeq_vv, 0b1010111, 0b001, 0b011000); +++ +++ // Vector Floating-Point Sign-Injection Instructions +++ INSN(vfsgnjx_vv, 0b1010111, 0b001, 0b001010); +++ INSN(vfsgnjn_vv, 0b1010111, 0b001, 0b001001); +++ INSN(vfsgnj_vv, 0b1010111, 0b001, 0b001000); +++ +++ // Vector Floating-Point MIN/MAX Instructions +++ INSN(vfmax_vv, 0b1010111, 0b001, 0b000110); +++ INSN(vfmin_vv, 0b1010111, 0b001, 0b000100); +++ +++ // Vector Single-Width Floating-Point Multiply/Divide Instructions +++ INSN(vfdiv_vv, 0b1010111, 0b001, 0b100000); +++ INSN(vfmul_vv, 0b1010111, 0b001, 0b100100); +++ +++ // Vector Single-Width Floating-Point Add/Subtract Instructions +++ INSN(vfsub_vv, 0b1010111, 0b001, 0b000010); +++ INSN(vfadd_vv, 0b1010111, 0b001, 0b000000); +++ +++ // Vector Single-Width Fractional Multiply with Rounding and Saturation +++ INSN(vsmul_vv, 0b1010111, 0b000, 0b100111); +++ +++ // Vector Integer Divide Instructions +++ INSN(vrem_vv, 0b1010111, 0b010, 0b100011); +++ INSN(vremu_vv, 0b1010111, 0b010, 0b100010); +++ INSN(vdiv_vv, 0b1010111, 0b010, 0b100001); +++ INSN(vdivu_vv, 0b1010111, 0b010, 0b100000); +++ +++ // Vector Single-Width Integer Multiply Instructions +++ INSN(vmulhsu_vv, 0b1010111, 0b010, 0b100110); +++ INSN(vmulhu_vv, 0b1010111, 0b010, 0b100100); +++ INSN(vmulh_vv, 0b1010111, 0b010, 0b100111); +++ INSN(vmul_vv, 0b1010111, 0b010, 0b100101); +++ +++ // Vector Integer Min/Max Instructions +++ INSN(vmax_vv, 0b1010111, 0b000, 0b000111); +++ INSN(vmaxu_vv, 0b1010111, 0b000, 0b000110); +++ INSN(vmin_vv, 0b1010111, 0b000, 0b000101); +++ INSN(vminu_vv, 0b1010111, 0b000, 0b000100); +++ +++ // Vector Integer Comparison Instructions +++ INSN(vmsle_vv, 0b1010111, 0b000, 0b011101); +++ INSN(vmsleu_vv, 0b1010111, 0b000, 0b011100); +++ INSN(vmslt_vv, 0b1010111, 0b000, 0b011011); +++ INSN(vmsltu_vv, 0b1010111, 0b000, 0b011010); +++ INSN(vmsne_vv, 0b1010111, 0b000, 0b011001); +++ INSN(vmseq_vv, 0b1010111, 0b000, 0b011000); +++ +++ // Vector Single-Width Bit Shift Instructions +++ INSN(vsra_vv, 0b1010111, 0b000, 0b101001); +++ INSN(vsrl_vv, 0b1010111, 0b000, 0b101000); +++ INSN(vsll_vv, 0b1010111, 0b000, 0b100101); +++ +++ // Vector Bitwise Logical Instructions +++ INSN(vxor_vv, 0b1010111, 0b000, 0b001011); +++ INSN(vor_vv, 0b1010111, 0b000, 0b001010); +++ INSN(vand_vv, 0b1010111, 0b000, 0b001001); +++ +++ // Vector Single-Width Integer Add and Subtract +++ INSN(vsub_vv, 0b1010111, 0b000, 0b000010); +++ INSN(vadd_vv, 0b1010111, 0b000, 0b000000); +++ +++#undef INSN +++ +++ +++#define INSN(NAME, op, funct3, funct6) \ +++ void NAME(VectorRegister Vd, VectorRegister Vs2, Register Rs1, VectorMask vm = unmasked) { \ +++ patch_VArith(op, Vd, funct3, Rs1->encoding_nocheck(), Vs2, vm, funct6); \ +++ } +++ +++ // Vector Integer Divide Instructions +++ INSN(vrem_vx, 0b1010111, 0b110, 0b100011); +++ INSN(vremu_vx, 0b1010111, 0b110, 0b100010); +++ INSN(vdiv_vx, 0b1010111, 0b110, 0b100001); +++ INSN(vdivu_vx, 0b1010111, 0b110, 0b100000); +++ +++ // Vector Single-Width Integer Multiply Instructions +++ INSN(vmulhsu_vx, 0b1010111, 0b110, 0b100110); +++ INSN(vmulhu_vx, 0b1010111, 0b110, 0b100100); +++ INSN(vmulh_vx, 0b1010111, 0b110, 0b100111); +++ INSN(vmul_vx, 0b1010111, 0b110, 0b100101); +++ +++ // Vector Integer Min/Max Instructions +++ INSN(vmax_vx, 0b1010111, 0b100, 0b000111); +++ INSN(vmaxu_vx, 0b1010111, 0b100, 0b000110); +++ INSN(vmin_vx, 0b1010111, 0b100, 0b000101); +++ INSN(vminu_vx, 0b1010111, 0b100, 0b000100); +++ +++ // Vector Integer Comparison Instructions +++ INSN(vmsgt_vx, 0b1010111, 0b100, 0b011111); +++ INSN(vmsgtu_vx, 0b1010111, 0b100, 0b011110); +++ INSN(vmsle_vx, 0b1010111, 0b100, 0b011101); +++ INSN(vmsleu_vx, 0b1010111, 0b100, 0b011100); +++ INSN(vmslt_vx, 0b1010111, 0b100, 0b011011); +++ INSN(vmsltu_vx, 0b1010111, 0b100, 0b011010); +++ INSN(vmsne_vx, 0b1010111, 0b100, 0b011001); +++ INSN(vmseq_vx, 0b1010111, 0b100, 0b011000); +++ +++ // Vector Narrowing Integer Right Shift Instructions +++ INSN(vnsra_wx, 0b1010111, 0b100, 0b101101); +++ INSN(vnsrl_wx, 0b1010111, 0b100, 0b101100); +++ +++ // Vector Single-Width Bit Shift Instructions +++ INSN(vsra_vx, 0b1010111, 0b100, 0b101001); +++ INSN(vsrl_vx, 0b1010111, 0b100, 0b101000); +++ INSN(vsll_vx, 0b1010111, 0b100, 0b100101); +++ +++ // Vector Bitwise Logical Instructions +++ INSN(vxor_vx, 0b1010111, 0b100, 0b001011); +++ INSN(vor_vx, 0b1010111, 0b100, 0b001010); +++ INSN(vand_vx, 0b1010111, 0b100, 0b001001); +++ +++ // Vector Single-Width Integer Add and Subtract +++ INSN(vsub_vx, 0b1010111, 0b100, 0b000010); +++ INSN(vadd_vx, 0b1010111, 0b100, 0b000000); +++ INSN(vrsub_vx, 0b1010111, 0b100, 0b000011); +++ +++#undef INSN +++ +++#define INSN(NAME, op, funct3, funct6) \ +++ void NAME(VectorRegister Vd, VectorRegister Vs2, FloatRegister Rs1, VectorMask vm = unmasked) { \ +++ patch_VArith(op, Vd, funct3, Rs1->encoding_nocheck(), Vs2, vm, funct6); \ +++ } +++ +++ // Vector Floating-Point Compare Instructions +++ INSN(vmfge_vf, 0b1010111, 0b101, 0b011111); +++ INSN(vmfgt_vf, 0b1010111, 0b101, 0b011101); +++ INSN(vmfle_vf, 0b1010111, 0b101, 0b011001); +++ INSN(vmflt_vf, 0b1010111, 0b101, 0b011011); +++ INSN(vmfne_vf, 0b1010111, 0b101, 0b011100); +++ INSN(vmfeq_vf, 0b1010111, 0b101, 0b011000); +++ +++ // Vector Floating-Point Sign-Injection Instructions +++ INSN(vfsgnjx_vf, 0b1010111, 0b101, 0b001010); +++ INSN(vfsgnjn_vf, 0b1010111, 0b101, 0b001001); +++ INSN(vfsgnj_vf, 0b1010111, 0b101, 0b001000); +++ +++ // Vector Floating-Point MIN/MAX Instructions +++ INSN(vfmax_vf, 0b1010111, 0b101, 0b000110); +++ INSN(vfmin_vf, 0b1010111, 0b101, 0b000100); +++ +++ // Vector Single-Width Floating-Point Multiply/Divide Instructions +++ INSN(vfdiv_vf, 0b1010111, 0b101, 0b100000); +++ INSN(vfmul_vf, 0b1010111, 0b101, 0b100100); +++ INSN(vfrdiv_vf, 0b1010111, 0b101, 0b100001); +++ +++ // Vector Single-Width Floating-Point Add/Subtract Instructions +++ INSN(vfsub_vf, 0b1010111, 0b101, 0b000010); +++ INSN(vfadd_vf, 0b1010111, 0b101, 0b000000); +++ INSN(vfrsub_vf, 0b1010111, 0b101, 0b100111); +++ +++#undef INSN +++ +++#define INSN(NAME, op, funct3, funct6) \ +++ void NAME(VectorRegister Vd, VectorRegister Vs2, int32_t imm, VectorMask vm = unmasked) { \ +++ guarantee(is_simm5(imm), "imm is invalid"); \ +++ patch_VArith(op, Vd, funct3, (uint32_t)(imm & 0x1f), Vs2, vm, funct6); \ +++ } +++ +++ INSN(vmsgt_vi, 0b1010111, 0b011, 0b011111); +++ INSN(vmsgtu_vi, 0b1010111, 0b011, 0b011110); +++ INSN(vmsle_vi, 0b1010111, 0b011, 0b011101); +++ INSN(vmsleu_vi, 0b1010111, 0b011, 0b011100); +++ INSN(vmsne_vi, 0b1010111, 0b011, 0b011001); +++ INSN(vmseq_vi, 0b1010111, 0b011, 0b011000); +++ INSN(vxor_vi, 0b1010111, 0b011, 0b001011); +++ INSN(vor_vi, 0b1010111, 0b011, 0b001010); +++ INSN(vand_vi, 0b1010111, 0b011, 0b001001); +++ INSN(vadd_vi, 0b1010111, 0b011, 0b000000); +++ INSN(vrsub_vi, 0b1010111, 0b011, 0b000011); +++ +++#undef INSN +++ +++#define INSN(NAME, op, funct3, vm, funct6) \ +++ void NAME(VectorRegister Vd, VectorRegister Vs2, VectorRegister Vs1) { \ +++ patch_VArith(op, Vd, funct3, Vs1->encoding_nocheck(), Vs2, vm, funct6); \ +++ } +++ +++ // Vector Compress Instruction +++ INSN(vcompress_vm, 0b1010111, 0b010, 0b1, 0b010111); +++ +++ // Vector Mask-Register Logical Instructions +++ INSN(vmxnor_mm, 0b1010111, 0b010, 0b1, 0b011111); +++ INSN(vmorn_mm, 0b1010111, 0b010, 0b1, 0b011100); +++ INSN(vmnor_mm, 0b1010111, 0b010, 0b1, 0b011110); +++ INSN(vmor_mm, 0b1010111, 0b010, 0b1, 0b011010); +++ INSN(vmxor_mm, 0b1010111, 0b010, 0b1, 0b011011); +++ INSN(vmandn_mm, 0b1010111, 0b010, 0b1, 0b011000); +++ INSN(vmnand_mm, 0b1010111, 0b010, 0b1, 0b011101); +++ INSN(vmand_mm, 0b1010111, 0b010, 0b1, 0b011001); +++ +++#undef INSN +++ +++#define INSN(NAME, op, funct3, Vs2, vm, funct6) \ +++ void NAME(VectorRegister Vd, int32_t imm) { \ +++ guarantee(is_simm5(imm), "imm is invalid"); \ +++ patch_VArith(op, Vd, funct3, (uint32_t)(imm & 0x1f), Vs2, vm, funct6); \ +++ } +++ +++ // Vector Integer Move Instructions +++ INSN(vmv_v_i, 0b1010111, 0b011, v0, 0b1, 0b010111); +++ +++#undef INSN +++ +++#define INSN(NAME, op, funct3, Vs2, vm, funct6) \ +++ void NAME(VectorRegister Vd, FloatRegister Rs1) { \ +++ patch_VArith(op, Vd, funct3, Rs1->encoding_nocheck(), Vs2, vm, funct6); \ +++ } +++ +++ // Floating-Point Scalar Move Instructions +++ INSN(vfmv_s_f, 0b1010111, 0b101, v0, 0b1, 0b010000); +++ // Vector Floating-Point Move Instruction +++ INSN(vfmv_v_f, 0b1010111, 0b101, v0, 0b1, 0b010111); +++ +++#undef INSN +++ +++#define INSN(NAME, op, funct3, Vs2, vm, funct6) \ +++ void NAME(VectorRegister Vd, VectorRegister Vs1) { \ +++ patch_VArith(op, Vd, funct3, Vs1->encoding_nocheck(), Vs2, vm, funct6); \ +++ } +++ +++ // Vector Integer Move Instructions +++ INSN(vmv_v_v, 0b1010111, 0b000, v0, 0b1, 0b010111); +++ +++#undef INSN +++ +++#define INSN(NAME, op, funct3, Vs2, vm, funct6) \ +++ void NAME(VectorRegister Vd, Register Rs1) { \ +++ patch_VArith(op, Vd, funct3, Rs1->encoding_nocheck(), Vs2, vm, funct6); \ +++ } +++ +++ // Integer Scalar Move Instructions +++ INSN(vmv_s_x, 0b1010111, 0b110, v0, 0b1, 0b010000); +++ +++ // Vector Integer Move Instructions +++ INSN(vmv_v_x, 0b1010111, 0b100, v0, 0b1, 0b010111); +++ +++#undef INSN +++#undef patch_VArith +++ +++#define INSN(NAME, op, funct13, funct6) \ +++ void NAME(VectorRegister Vd, VectorMask vm = unmasked) { \ +++ unsigned insn = 0; \ +++ patch((address)&insn, 6, 0, op); \ +++ patch((address)&insn, 24, 12, funct13); \ +++ patch((address)&insn, 25, vm); \ +++ patch((address)&insn, 31, 26, funct6); \ +++ patch_reg((address)&insn, 7, Vd); \ +++ emit(insn); \ +++ } +++ +++ // Vector Element Index Instruction +++ INSN(vid_v, 0b1010111, 0b0000010001010, 0b010100); +++ +++#undef INSN +++ +++enum Nf { +++ g1 = 0b000, +++ g2 = 0b001, +++ g3 = 0b010, +++ g4 = 0b011, +++ g5 = 0b100, +++ g6 = 0b101, +++ g7 = 0b110, +++ g8 = 0b111 +++}; +++ +++#define patch_VLdSt(op, VReg, width, Rs1, Reg_or_umop, vm, mop, mew, nf) \ +++ unsigned insn = 0; \ +++ patch((address)&insn, 6, 0, op); \ +++ patch((address)&insn, 14, 12, width); \ +++ patch((address)&insn, 24, 20, Reg_or_umop); \ +++ patch((address)&insn, 25, vm); \ +++ patch((address)&insn, 27, 26, mop); \ +++ patch((address)&insn, 28, mew); \ +++ patch((address)&insn, 31, 29, nf); \ +++ patch_reg((address)&insn, 7, VReg); \ +++ patch_reg((address)&insn, 15, Rs1); \ +++ emit(insn) +++ +++#define INSN(NAME, op, lumop, vm, mop, nf) \ +++ void NAME(VectorRegister Vd, Register Rs1, uint32_t width = 0, bool mew = false) { \ +++ guarantee(is_uimm3(width), "width is invalid"); \ +++ patch_VLdSt(op, Vd, width, Rs1, lumop, vm, mop, mew, nf); \ +++ } +++ +++ // Vector Load/Store Instructions +++ INSN(vl1re8_v, 0b0000111, 0b01000, 0b1, 0b00, g1); +++ +++#undef INSN +++ +++#define INSN(NAME, op, width, sumop, vm, mop, mew, nf) \ +++ void NAME(VectorRegister Vs3, Register Rs1) { \ +++ patch_VLdSt(op, Vs3, width, Rs1, sumop, vm, mop, mew, nf); \ +++ } +++ +++ // Vector Load/Store Instructions +++ INSN(vs1r_v, 0b0100111, 0b000, 0b01000, 0b1, 0b00, 0b0, g1); +++ +++#undef INSN +++ +++// r2_nfvm +++#define INSN(NAME, op, width, umop, mop, mew) \ +++ void NAME(VectorRegister Vd_or_Vs3, Register Rs1, Nf nf = g1) { \ +++ patch_VLdSt(op, Vd_or_Vs3, width, Rs1, umop, 1, mop, mew, nf); \ +++ } +++ +++ // Vector Unit-Stride Instructions +++ INSN(vlm_v, 0b0000111, 0b000, 0b01011, 0b00, 0b0); +++ INSN(vsm_v, 0b0100111, 0b000, 0b01011, 0b00, 0b0); +++ +++#undef INSN +++ +++#define INSN(NAME, op, width, umop, mop, mew) \ +++ void NAME(VectorRegister Vd_or_Vs3, Register Rs1, VectorMask vm = unmasked, Nf nf = g1) { \ +++ patch_VLdSt(op, Vd_or_Vs3, width, Rs1, umop, vm, mop, mew, nf); \ +++ } +++ +++ // Vector Unit-Stride Instructions +++ INSN(vle8_v, 0b0000111, 0b000, 0b00000, 0b00, 0b0); +++ INSN(vle16_v, 0b0000111, 0b101, 0b00000, 0b00, 0b0); +++ INSN(vle32_v, 0b0000111, 0b110, 0b00000, 0b00, 0b0); +++ INSN(vle64_v, 0b0000111, 0b111, 0b00000, 0b00, 0b0); +++ +++ // Vector unit-stride fault-only-first Instructions +++ INSN(vle8ff_v, 0b0000111, 0b000, 0b10000, 0b00, 0b0); +++ INSN(vle16ff_v, 0b0000111, 0b101, 0b10000, 0b00, 0b0); +++ INSN(vle32ff_v, 0b0000111, 0b110, 0b10000, 0b00, 0b0); +++ INSN(vle64ff_v, 0b0000111, 0b111, 0b10000, 0b00, 0b0); +++ +++ INSN(vse8_v, 0b0100111, 0b000, 0b00000, 0b00, 0b0); +++ INSN(vse16_v, 0b0100111, 0b101, 0b00000, 0b00, 0b0); +++ INSN(vse32_v, 0b0100111, 0b110, 0b00000, 0b00, 0b0); +++ INSN(vse64_v, 0b0100111, 0b111, 0b00000, 0b00, 0b0); +++ +++#undef INSN +++ +++#define INSN(NAME, op, width, mop, mew) \ +++ void NAME(VectorRegister Vd, Register Rs1, VectorRegister Vs2, VectorMask vm = unmasked, Nf nf = g1) { \ +++ patch_VLdSt(op, Vd, width, Rs1, Vs2->encoding_nocheck(), vm, mop, mew, nf); \ +++ } +++ +++ // Vector unordered indexed load instructions +++ INSN(vluxei8_v, 0b0000111, 0b000, 0b01, 0b0); +++ INSN(vluxei16_v, 0b0000111, 0b101, 0b01, 0b0); +++ INSN(vluxei32_v, 0b0000111, 0b110, 0b01, 0b0); +++ INSN(vluxei64_v, 0b0000111, 0b111, 0b01, 0b0); +++ +++ // Vector ordered indexed load instructions +++ INSN(vloxei8_v, 0b0000111, 0b000, 0b11, 0b0); +++ INSN(vloxei16_v, 0b0000111, 0b101, 0b11, 0b0); +++ INSN(vloxei32_v, 0b0000111, 0b110, 0b11, 0b0); +++ INSN(vloxei64_v, 0b0000111, 0b111, 0b11, 0b0); +++#undef INSN +++ +++#define INSN(NAME, op, width, mop, mew) \ +++ void NAME(VectorRegister Vd, Register Rs1, Register Rs2, VectorMask vm = unmasked, Nf nf = g1) { \ +++ patch_VLdSt(op, Vd, width, Rs1, Rs2->encoding_nocheck(), vm, mop, mew, nf); \ +++ } +++ +++ // Vector Strided Instructions +++ INSN(vlse8_v, 0b0000111, 0b000, 0b10, 0b0); +++ INSN(vlse16_v, 0b0000111, 0b101, 0b10, 0b0); +++ INSN(vlse32_v, 0b0000111, 0b110, 0b10, 0b0); +++ INSN(vlse64_v, 0b0000111, 0b111, 0b10, 0b0); +++ +++#undef INSN +++#undef patch_VLdSt +++ +++// ==================================== +++// RISC-V Bit-Manipulation Extension +++// Currently only support Zba, Zbb and Zbs bitmanip extensions. +++// ==================================== +++#define INSN(NAME, op, funct3, funct7) \ +++ void NAME(Register Rd, Register Rs1, Register Rs2) { \ +++ unsigned insn = 0; \ +++ patch((address)&insn, 6, 0, op); \ +++ patch((address)&insn, 14, 12, funct3); \ +++ patch((address)&insn, 31, 25, funct7); \ +++ patch_reg((address)&insn, 7, Rd); \ +++ patch_reg((address)&insn, 15, Rs1); \ +++ patch_reg((address)&insn, 20, Rs2); \ +++ emit(insn); \ +++ } +++ +++ INSN(add_uw, 0b0111011, 0b000, 0b0000100); +++ INSN(rol, 0b0110011, 0b001, 0b0110000); +++ INSN(rolw, 0b0111011, 0b001, 0b0110000); +++ INSN(ror, 0b0110011, 0b101, 0b0110000); +++ INSN(rorw, 0b0111011, 0b101, 0b0110000); +++ INSN(sh1add, 0b0110011, 0b010, 0b0010000); +++ INSN(sh2add, 0b0110011, 0b100, 0b0010000); +++ INSN(sh3add, 0b0110011, 0b110, 0b0010000); +++ INSN(sh1add_uw, 0b0111011, 0b010, 0b0010000); +++ INSN(sh2add_uw, 0b0111011, 0b100, 0b0010000); +++ INSN(sh3add_uw, 0b0111011, 0b110, 0b0010000); +++ INSN(andn, 0b0110011, 0b111, 0b0100000); +++ INSN(orn, 0b0110011, 0b110, 0b0100000); +++ INSN(xnor, 0b0110011, 0b100, 0b0100000); +++ INSN(max, 0b0110011, 0b110, 0b0000101); +++ INSN(maxu, 0b0110011, 0b111, 0b0000101); +++ INSN(min, 0b0110011, 0b100, 0b0000101); +++ INSN(minu, 0b0110011, 0b101, 0b0000101); +++ +++#undef INSN +++ +++#define INSN(NAME, op, funct3, funct12) \ +++ void NAME(Register Rd, Register Rs1) { \ +++ unsigned insn = 0; \ +++ patch((address)&insn, 6, 0, op); \ +++ patch((address)&insn, 14, 12, funct3); \ +++ patch((address)&insn, 31, 20, funct12); \ +++ patch_reg((address)&insn, 7, Rd); \ +++ patch_reg((address)&insn, 15, Rs1); \ +++ emit(insn); \ +++ } +++ +++ INSN(rev8, 0b0010011, 0b101, 0b011010111000); +++ INSN(sext_b, 0b0010011, 0b001, 0b011000000100); +++ INSN(sext_h, 0b0010011, 0b001, 0b011000000101); +++ INSN(zext_h, 0b0111011, 0b100, 0b000010000000); +++ INSN(clz, 0b0010011, 0b001, 0b011000000000); +++ INSN(clzw, 0b0011011, 0b001, 0b011000000000); +++ INSN(ctz, 0b0010011, 0b001, 0b011000000001); +++ INSN(ctzw, 0b0011011, 0b001, 0b011000000001); +++ INSN(cpop, 0b0010011, 0b001, 0b011000000010); +++ INSN(cpopw, 0b0011011, 0b001, 0b011000000010); +++ INSN(orc_b, 0b0010011, 0b101, 0b001010000111); +++ +++#undef INSN +++ +++#define INSN(NAME, op, funct3, funct6) \ +++ void NAME(Register Rd, Register Rs1, unsigned shamt) {\ +++ guarantee(shamt <= 0x3f, "Shamt is invalid"); \ +++ unsigned insn = 0; \ +++ patch((address)&insn, 6, 0, op); \ +++ patch((address)&insn, 14, 12, funct3); \ +++ patch((address)&insn, 25, 20, shamt); \ +++ patch((address)&insn, 31, 26, funct6); \ +++ patch_reg((address)&insn, 7, Rd); \ +++ patch_reg((address)&insn, 15, Rs1); \ +++ emit(insn); \ +++ } +++ +++ INSN(rori, 0b0010011, 0b101, 0b011000); +++ INSN(slli_uw, 0b0011011, 0b001, 0b000010); +++ INSN(bexti, 0b0010011, 0b101, 0b010010); +++ +++#undef INSN +++ +++#define INSN(NAME, op, funct3, funct7) \ +++ void NAME(Register Rd, Register Rs1, unsigned shamt) {\ +++ guarantee(shamt <= 0x1f, "Shamt is invalid"); \ +++ unsigned insn = 0; \ +++ patch((address)&insn, 6, 0, op); \ +++ patch((address)&insn, 14, 12, funct3); \ +++ patch((address)&insn, 24, 20, shamt); \ +++ patch((address)&insn, 31, 25, funct7); \ +++ patch_reg((address)&insn, 7, Rd); \ +++ patch_reg((address)&insn, 15, Rs1); \ +++ emit(insn); \ +++ } +++ +++ INSN(roriw, 0b0011011, 0b101, 0b0110000); +++ +++#undef INSN +++ +++// ======================================== +++// RISC-V Compressed Instructions Extension +++// ======================================== +++// Note: +++// 1. Assembler functions encoding 16-bit compressed instructions always begin with a 'c_' +++// prefix, such as 'c_add'. Correspondingly, assembler functions encoding normal 32-bit +++// instructions with begin with a '_' prefix, such as "_add". Most of time users have no +++// need to explicitly emit these compressed instructions. Instead, they still use unified +++// wrappers such as 'add' which do the compressing work through 'c_add' depending on the +++// the operands of the instruction and availability of the RVC hardware extension. +++// +++// 2. 'CompressibleRegion' and 'IncompressibleRegion' are introduced to mark assembler scopes +++// within which instructions are qualified or unqualified to be compressed into their 16-bit +++// versions. An example: +++// +++// CompressibleRegion cr(_masm); +++// __ add(...); // this instruction will be compressed into 'c.add' when possible +++// { +++// IncompressibleRegion ir(_masm); +++// __ add(...); // this instruction will not be compressed +++// { +++// CompressibleRegion cr(_masm); +++// __ add(...); // this instruction will be compressed into 'c.add' when possible +++// } +++// } +++// +++// 3. When printing JIT assembly code, using -XX:PrintAssemblyOptions=no-aliases could help +++// distinguish compressed 16-bit instructions from normal 32-bit ones. +++ +++private: +++ bool _in_compressible_region; +++public: +++ bool in_compressible_region() const { return _in_compressible_region; } +++ void set_in_compressible_region(bool b) { _in_compressible_region = b; } +++public: +++ +++ // an abstract compressible region +++ class AbstractCompressibleRegion : public StackObj { +++ protected: +++ Assembler *_masm; +++ bool _saved_in_compressible_region; +++ protected: +++ AbstractCompressibleRegion(Assembler *_masm) +++ : _masm(_masm) +++ , _saved_in_compressible_region(_masm->in_compressible_region()) {} +++ }; +++ // a compressible region +++ class CompressibleRegion : public AbstractCompressibleRegion { +++ public: +++ CompressibleRegion(Assembler *_masm) : AbstractCompressibleRegion(_masm) { +++ _masm->set_in_compressible_region(true); +++ } +++ ~CompressibleRegion() { +++ _masm->set_in_compressible_region(_saved_in_compressible_region); +++ } +++ }; +++ // an incompressible region +++ class IncompressibleRegion : public AbstractCompressibleRegion { +++ public: +++ IncompressibleRegion(Assembler *_masm) : AbstractCompressibleRegion(_masm) { +++ _masm->set_in_compressible_region(false); +++ } +++ ~IncompressibleRegion() { +++ _masm->set_in_compressible_region(_saved_in_compressible_region); +++ } +++ }; +++ +++public: +++ // Emit a relocation. +++ void relocate(RelocationHolder const& rspec, int format = 0) { +++ AbstractAssembler::relocate(rspec, format); +++ } +++ void relocate(relocInfo::relocType rtype, int format = 0) { +++ AbstractAssembler::relocate(rtype, format); +++ } +++ template +++ void relocate(RelocationHolder const& rspec, Callback emit_insts, int format = 0) { +++ AbstractAssembler::relocate(rspec, format); +++ IncompressibleRegion ir(this); // relocations +++ emit_insts(); +++ } +++ template +++ void relocate(relocInfo::relocType rtype, Callback emit_insts, int format = 0) { +++ AbstractAssembler::relocate(rtype, format); +++ IncompressibleRegion ir(this); // relocations +++ emit_insts(); +++ } +++ +++ // patch a 16-bit instruction. +++ static void c_patch(address a, unsigned msb, unsigned lsb, uint16_t val) { +++ assert_cond(a != NULL); +++ assert_cond(msb >= lsb && msb <= 15); +++ unsigned nbits = msb - lsb + 1; +++ guarantee(val < (1U << nbits), "Field too big for insn"); +++ uint16_t mask = (1U << nbits) - 1; +++ val <<= lsb; +++ mask <<= lsb; +++ uint16_t target = *(uint16_t *)a; +++ target &= ~mask; +++ target |= val; +++ *(uint16_t *)a = target; +++ } +++ +++ static void c_patch(address a, unsigned bit, uint16_t val) { +++ c_patch(a, bit, bit, val); +++ } +++ +++ // patch a 16-bit instruction with a general purpose register ranging [0, 31] (5 bits) +++ static void c_patch_reg(address a, unsigned lsb, Register reg) { +++ c_patch(a, lsb + 4, lsb, reg->encoding_nocheck()); +++ } +++ +++ // patch a 16-bit instruction with a general purpose register ranging [8, 15] (3 bits) +++ static void c_patch_compressed_reg(address a, unsigned lsb, Register reg) { +++ c_patch(a, lsb + 2, lsb, reg->compressed_encoding_nocheck()); +++ } +++ +++ // patch a 16-bit instruction with a float register ranging [0, 31] (5 bits) +++ static void c_patch_reg(address a, unsigned lsb, FloatRegister reg) { +++ c_patch(a, lsb + 4, lsb, reg->encoding_nocheck()); +++ } +++ +++ // patch a 16-bit instruction with a float register ranging [8, 15] (3 bits) +++ static void c_patch_compressed_reg(address a, unsigned lsb, FloatRegister reg) { +++ c_patch(a, lsb + 2, lsb, reg->compressed_encoding_nocheck()); +++ } +++ +++// -------------- RVC Instruction Definitions -------------- +++ +++ void c_nop() { +++ c_addi(x0, 0); +++ } +++ +++#define INSN(NAME, funct3, op) \ +++ void NAME(Register Rd_Rs1, int32_t imm) { \ +++ assert_cond(is_simm6(imm)); \ +++ uint16_t insn = 0; \ +++ c_patch((address)&insn, 1, 0, op); \ +++ c_patch((address)&insn, 6, 2, (imm & right_n_bits(5))); \ +++ c_patch_reg((address)&insn, 7, Rd_Rs1); \ +++ c_patch((address)&insn, 12, 12, (imm & nth_bit(5)) >> 5); \ +++ c_patch((address)&insn, 15, 13, funct3); \ +++ emit_int16(insn); \ +++ } +++ +++ INSN(c_addi, 0b000, 0b01); +++ INSN(c_addiw, 0b001, 0b01); +++ +++#undef INSN +++ +++#define INSN(NAME, funct3, op) \ +++ void NAME(int32_t imm) { \ +++ assert_cond(is_simm10(imm)); \ +++ assert_cond((imm & 0b1111) == 0); \ +++ assert_cond(imm != 0); \ +++ uint16_t insn = 0; \ +++ c_patch((address)&insn, 1, 0, op); \ +++ c_patch((address)&insn, 2, 2, (imm & nth_bit(5)) >> 5); \ +++ c_patch((address)&insn, 4, 3, (imm & right_n_bits(9)) >> 7); \ +++ c_patch((address)&insn, 5, 5, (imm & nth_bit(6)) >> 6); \ +++ c_patch((address)&insn, 6, 6, (imm & nth_bit(4)) >> 4); \ +++ c_patch_reg((address)&insn, 7, sp); \ +++ c_patch((address)&insn, 12, 12, (imm & nth_bit(9)) >> 9); \ +++ c_patch((address)&insn, 15, 13, funct3); \ +++ emit_int16(insn); \ +++ } +++ +++ INSN(c_addi16sp, 0b011, 0b01); +++ +++#undef INSN +++ +++#define INSN(NAME, funct3, op) \ +++ void NAME(Register Rd, uint32_t uimm) { \ +++ assert_cond(is_uimm10(uimm)); \ +++ assert_cond((uimm & 0b11) == 0); \ +++ assert_cond(uimm != 0); \ +++ uint16_t insn = 0; \ +++ c_patch((address)&insn, 1, 0, op); \ +++ c_patch_compressed_reg((address)&insn, 2, Rd); \ +++ c_patch((address)&insn, 5, 5, (uimm & nth_bit(3)) >> 3); \ +++ c_patch((address)&insn, 6, 6, (uimm & nth_bit(2)) >> 2); \ +++ c_patch((address)&insn, 10, 7, (uimm & right_n_bits(10)) >> 6); \ +++ c_patch((address)&insn, 12, 11, (uimm & right_n_bits(6)) >> 4); \ +++ c_patch((address)&insn, 15, 13, funct3); \ +++ emit_int16(insn); \ +++ } +++ +++ INSN(c_addi4spn, 0b000, 0b00); +++ +++#undef INSN +++ +++#define INSN(NAME, funct3, op) \ +++ void NAME(Register Rd_Rs1, uint32_t shamt) { \ +++ assert_cond(is_uimm6(shamt)); \ +++ assert_cond(shamt != 0); \ +++ assert_cond(Rd_Rs1 != x0); \ +++ uint16_t insn = 0; \ +++ c_patch((address)&insn, 1, 0, op); \ +++ c_patch((address)&insn, 6, 2, (shamt & right_n_bits(5))); \ +++ c_patch_reg((address)&insn, 7, Rd_Rs1); \ +++ c_patch((address)&insn, 12, 12, (shamt & nth_bit(5)) >> 5); \ +++ c_patch((address)&insn, 15, 13, funct3); \ +++ emit_int16(insn); \ +++ } +++ +++ INSN(c_slli, 0b000, 0b10); +++ +++#undef INSN +++ +++#define INSN(NAME, funct3, funct2, op) \ +++ void NAME(Register Rd_Rs1, uint32_t shamt) { \ +++ assert_cond(is_uimm6(shamt)); \ +++ assert_cond(shamt != 0); \ +++ uint16_t insn = 0; \ +++ c_patch((address)&insn, 1, 0, op); \ +++ c_patch((address)&insn, 6, 2, (shamt & right_n_bits(5))); \ +++ c_patch_compressed_reg((address)&insn, 7, Rd_Rs1); \ +++ c_patch((address)&insn, 11, 10, funct2); \ +++ c_patch((address)&insn, 12, 12, (shamt & nth_bit(5)) >> 5); \ +++ c_patch((address)&insn, 15, 13, funct3); \ +++ emit_int16(insn); \ +++ } +++ +++ INSN(c_srli, 0b100, 0b00, 0b01); +++ INSN(c_srai, 0b100, 0b01, 0b01); +++ +++#undef INSN +++ +++#define INSN(NAME, funct3, funct2, op) \ +++ void NAME(Register Rd_Rs1, int32_t imm) { \ +++ assert_cond(is_simm6(imm)); \ +++ uint16_t insn = 0; \ +++ c_patch((address)&insn, 1, 0, op); \ +++ c_patch((address)&insn, 6, 2, (imm & right_n_bits(5))); \ +++ c_patch_compressed_reg((address)&insn, 7, Rd_Rs1); \ +++ c_patch((address)&insn, 11, 10, funct2); \ +++ c_patch((address)&insn, 12, 12, (imm & nth_bit(5)) >> 5); \ +++ c_patch((address)&insn, 15, 13, funct3); \ +++ emit_int16(insn); \ +++ } +++ +++ INSN(c_andi, 0b100, 0b10, 0b01); +++ +++#undef INSN +++ +++#define INSN(NAME, funct6, funct2, op) \ +++ void NAME(Register Rd_Rs1, Register Rs2) { \ +++ uint16_t insn = 0; \ +++ c_patch((address)&insn, 1, 0, op); \ +++ c_patch_compressed_reg((address)&insn, 2, Rs2); \ +++ c_patch((address)&insn, 6, 5, funct2); \ +++ c_patch_compressed_reg((address)&insn, 7, Rd_Rs1); \ +++ c_patch((address)&insn, 15, 10, funct6); \ +++ emit_int16(insn); \ +++ } +++ +++ INSN(c_sub, 0b100011, 0b00, 0b01); +++ INSN(c_xor, 0b100011, 0b01, 0b01); +++ INSN(c_or, 0b100011, 0b10, 0b01); +++ INSN(c_and, 0b100011, 0b11, 0b01); +++ INSN(c_subw, 0b100111, 0b00, 0b01); +++ INSN(c_addw, 0b100111, 0b01, 0b01); +++ +++#undef INSN +++ +++#define INSN(NAME, funct4, op) \ +++ void NAME(Register Rd_Rs1, Register Rs2) { \ +++ assert_cond(Rd_Rs1 != x0); \ +++ uint16_t insn = 0; \ +++ c_patch((address)&insn, 1, 0, op); \ +++ c_patch_reg((address)&insn, 2, Rs2); \ +++ c_patch_reg((address)&insn, 7, Rd_Rs1); \ +++ c_patch((address)&insn, 15, 12, funct4); \ +++ emit_int16(insn); \ +++ } +++ +++ INSN(c_mv, 0b1000, 0b10); +++ INSN(c_add, 0b1001, 0b10); +++ +++#undef INSN +++ +++#define INSN(NAME, funct4, op) \ +++ void NAME(Register Rs1) { \ +++ assert_cond(Rs1 != x0); \ +++ uint16_t insn = 0; \ +++ c_patch((address)&insn, 1, 0, op); \ +++ c_patch_reg((address)&insn, 2, x0); \ +++ c_patch_reg((address)&insn, 7, Rs1); \ +++ c_patch((address)&insn, 15, 12, funct4); \ +++ emit_int16(insn); \ +++ } +++ +++ INSN(c_jr, 0b1000, 0b10); +++ INSN(c_jalr, 0b1001, 0b10); +++ +++#undef INSN +++ +++ typedef void (Assembler::* j_c_insn)(address dest); +++ typedef void (Assembler::* compare_and_branch_c_insn)(Register Rs1, address dest); +++ +++ void wrap_label(Label &L, j_c_insn insn) { +++ if (L.is_bound()) { +++ (this->*insn)(target(L)); +++ } else { +++ L.add_patch_at(code(), locator()); +++ (this->*insn)(pc()); +++ } +++ } +++ +++ void wrap_label(Label &L, Register r, compare_and_branch_c_insn insn) { +++ if (L.is_bound()) { +++ (this->*insn)(r, target(L)); +++ } else { +++ L.add_patch_at(code(), locator()); +++ (this->*insn)(r, pc()); +++ } +++ } +++ +++#define INSN(NAME, funct3, op) \ +++ void NAME(int32_t offset) { \ +++ assert(is_simm12(offset) && ((offset % 2) == 0), "invalid encoding"); \ +++ uint16_t insn = 0; \ +++ c_patch((address)&insn, 1, 0, op); \ +++ c_patch((address)&insn, 2, 2, (offset & nth_bit(5)) >> 5); \ +++ c_patch((address)&insn, 5, 3, (offset & right_n_bits(4)) >> 1); \ +++ c_patch((address)&insn, 6, 6, (offset & nth_bit(7)) >> 7); \ +++ c_patch((address)&insn, 7, 7, (offset & nth_bit(6)) >> 6); \ +++ c_patch((address)&insn, 8, 8, (offset & nth_bit(10)) >> 10); \ +++ c_patch((address)&insn, 10, 9, (offset & right_n_bits(10)) >> 8); \ +++ c_patch((address)&insn, 11, 11, (offset & nth_bit(4)) >> 4); \ +++ c_patch((address)&insn, 12, 12, (offset & nth_bit(11)) >> 11); \ +++ c_patch((address)&insn, 15, 13, funct3); \ +++ emit_int16(insn); \ +++ } \ +++ void NAME(address dest) { \ +++ assert_cond(dest != NULL); \ +++ int64_t distance = dest - pc(); \ +++ assert(is_simm12(distance) && ((distance % 2) == 0), "invalid encoding"); \ +++ c_j(distance); \ +++ } \ +++ void NAME(Label &L) { \ +++ wrap_label(L, &Assembler::NAME); \ +++ } +++ +++ INSN(c_j, 0b101, 0b01); +++ +++#undef INSN +++ +++#define INSN(NAME, funct3, op) \ +++ void NAME(Register Rs1, int32_t imm) { \ +++ assert(is_simm9(imm) && ((imm % 2) == 0), "invalid encoding"); \ +++ uint16_t insn = 0; \ +++ c_patch((address)&insn, 1, 0, op); \ +++ c_patch((address)&insn, 2, 2, (imm & nth_bit(5)) >> 5); \ +++ c_patch((address)&insn, 4, 3, (imm & right_n_bits(3)) >> 1); \ +++ c_patch((address)&insn, 6, 5, (imm & right_n_bits(8)) >> 6); \ +++ c_patch_compressed_reg((address)&insn, 7, Rs1); \ +++ c_patch((address)&insn, 11, 10, (imm & right_n_bits(5)) >> 3); \ +++ c_patch((address)&insn, 12, 12, (imm & nth_bit(8)) >> 8); \ +++ c_patch((address)&insn, 15, 13, funct3); \ +++ emit_int16(insn); \ +++ } \ +++ void NAME(Register Rs1, address dest) { \ +++ assert_cond(dest != NULL); \ +++ int64_t distance = dest - pc(); \ +++ assert(is_simm9(distance) && ((distance % 2) == 0), "invalid encoding"); \ +++ NAME(Rs1, distance); \ +++ } \ +++ void NAME(Register Rs1, Label &L) { \ +++ wrap_label(L, Rs1, &Assembler::NAME); \ +++ } +++ +++ INSN(c_beqz, 0b110, 0b01); +++ INSN(c_bnez, 0b111, 0b01); +++ +++#undef INSN +++ +++#define INSN(NAME, funct3, op) \ +++ void NAME(Register Rd, int32_t imm) { \ +++ assert_cond(is_simm18(imm)); \ +++ assert_cond((imm & 0xfff) == 0); \ +++ assert_cond(imm != 0); \ +++ assert_cond(Rd != x0 && Rd != x2); \ +++ uint16_t insn = 0; \ +++ c_patch((address)&insn, 1, 0, op); \ +++ c_patch((address)&insn, 6, 2, (imm & right_n_bits(17)) >> 12); \ +++ c_patch_reg((address)&insn, 7, Rd); \ +++ c_patch((address)&insn, 12, 12, (imm & nth_bit(17)) >> 17); \ +++ c_patch((address)&insn, 15, 13, funct3); \ +++ emit_int16(insn); \ +++ } +++ +++ INSN(c_lui, 0b011, 0b01); +++ +++#undef INSN +++ +++#define INSN(NAME, funct3, op) \ +++ void NAME(Register Rd, int32_t imm) { \ +++ assert_cond(is_simm6(imm)); \ +++ assert_cond(Rd != x0); \ +++ uint16_t insn = 0; \ +++ c_patch((address)&insn, 1, 0, op); \ +++ c_patch((address)&insn, 6, 2, (imm & right_n_bits(5))); \ +++ c_patch_reg((address)&insn, 7, Rd); \ +++ c_patch((address)&insn, 12, 12, (imm & right_n_bits(6)) >> 5); \ +++ c_patch((address)&insn, 15, 13, funct3); \ +++ emit_int16(insn); \ +++ } +++ +++ INSN(c_li, 0b010, 0b01); +++ +++#undef INSN +++ +++#define INSN(NAME, funct3, op) \ +++ void NAME(Register Rd, uint32_t uimm) { \ +++ assert_cond(is_uimm9(uimm)); \ +++ assert_cond((uimm & 0b111) == 0); \ +++ assert_cond(Rd != x0); \ +++ uint16_t insn = 0; \ +++ c_patch((address)&insn, 1, 0, op); \ +++ c_patch((address)&insn, 4, 2, (uimm & right_n_bits(9)) >> 6); \ +++ c_patch((address)&insn, 6, 5, (uimm & right_n_bits(5)) >> 3); \ +++ c_patch_reg((address)&insn, 7, Rd); \ +++ c_patch((address)&insn, 12, 12, (uimm & nth_bit(5)) >> 5); \ +++ c_patch((address)&insn, 15, 13, funct3); \ +++ emit_int16(insn); \ +++ } +++ +++ INSN(c_ldsp, 0b011, 0b10); +++ +++#undef INSN +++ +++#define INSN(NAME, funct3, op) \ +++ void NAME(FloatRegister Rd, uint32_t uimm) { \ +++ assert_cond(is_uimm9(uimm)); \ +++ assert_cond((uimm & 0b111) == 0); \ +++ uint16_t insn = 0; \ +++ c_patch((address)&insn, 1, 0, op); \ +++ c_patch((address)&insn, 4, 2, (uimm & right_n_bits(9)) >> 6); \ +++ c_patch((address)&insn, 6, 5, (uimm & right_n_bits(5)) >> 3); \ +++ c_patch_reg((address)&insn, 7, Rd); \ +++ c_patch((address)&insn, 12, 12, (uimm & nth_bit(5)) >> 5); \ +++ c_patch((address)&insn, 15, 13, funct3); \ +++ emit_int16(insn); \ +++ } +++ +++ INSN(c_fldsp, 0b001, 0b10); +++ +++#undef INSN +++ +++#define INSN(NAME, funct3, op, REGISTER_TYPE) \ +++ void NAME(REGISTER_TYPE Rd_Rs2, Register Rs1, uint32_t uimm) { \ +++ assert_cond(is_uimm8(uimm)); \ +++ assert_cond((uimm & 0b111) == 0); \ +++ uint16_t insn = 0; \ +++ c_patch((address)&insn, 1, 0, op); \ +++ c_patch_compressed_reg((address)&insn, 2, Rd_Rs2); \ +++ c_patch((address)&insn, 6, 5, (uimm & right_n_bits(8)) >> 6); \ +++ c_patch_compressed_reg((address)&insn, 7, Rs1); \ +++ c_patch((address)&insn, 12, 10, (uimm & right_n_bits(6)) >> 3); \ +++ c_patch((address)&insn, 15, 13, funct3); \ +++ emit_int16(insn); \ +++ } +++ +++ INSN(c_ld, 0b011, 0b00, Register); +++ INSN(c_sd, 0b111, 0b00, Register); +++ INSN(c_fld, 0b001, 0b00, FloatRegister); +++ INSN(c_fsd, 0b101, 0b00, FloatRegister); +++ +++#undef INSN +++ +++#define INSN(NAME, funct3, op, REGISTER_TYPE) \ +++ void NAME(REGISTER_TYPE Rs2, uint32_t uimm) { \ +++ assert_cond(is_uimm9(uimm)); \ +++ assert_cond((uimm & 0b111) == 0); \ +++ uint16_t insn = 0; \ +++ c_patch((address)&insn, 1, 0, op); \ +++ c_patch_reg((address)&insn, 2, Rs2); \ +++ c_patch((address)&insn, 9, 7, (uimm & right_n_bits(9)) >> 6); \ +++ c_patch((address)&insn, 12, 10, (uimm & right_n_bits(6)) >> 3); \ +++ c_patch((address)&insn, 15, 13, funct3); \ +++ emit_int16(insn); \ +++ } +++ +++ INSN(c_sdsp, 0b111, 0b10, Register); +++ INSN(c_fsdsp, 0b101, 0b10, FloatRegister); +++ +++#undef INSN +++ +++#define INSN(NAME, funct3, op) \ +++ void NAME(Register Rs2, uint32_t uimm) { \ +++ assert_cond(is_uimm8(uimm)); \ +++ assert_cond((uimm & 0b11) == 0); \ +++ uint16_t insn = 0; \ +++ c_patch((address)&insn, 1, 0, op); \ +++ c_patch_reg((address)&insn, 2, Rs2); \ +++ c_patch((address)&insn, 8, 7, (uimm & right_n_bits(8)) >> 6); \ +++ c_patch((address)&insn, 12, 9, (uimm & right_n_bits(6)) >> 2); \ +++ c_patch((address)&insn, 15, 13, funct3); \ +++ emit_int16(insn); \ +++ } +++ +++ INSN(c_swsp, 0b110, 0b10); +++ +++#undef INSN +++ +++#define INSN(NAME, funct3, op) \ +++ void NAME(Register Rd, uint32_t uimm) { \ +++ assert_cond(is_uimm8(uimm)); \ +++ assert_cond((uimm & 0b11) == 0); \ +++ assert_cond(Rd != x0); \ +++ uint16_t insn = 0; \ +++ c_patch((address)&insn, 1, 0, op); \ +++ c_patch((address)&insn, 3, 2, (uimm & right_n_bits(8)) >> 6); \ +++ c_patch((address)&insn, 6, 4, (uimm & right_n_bits(5)) >> 2); \ +++ c_patch_reg((address)&insn, 7, Rd); \ +++ c_patch((address)&insn, 12, 12, (uimm & nth_bit(5)) >> 5); \ +++ c_patch((address)&insn, 15, 13, funct3); \ +++ emit_int16(insn); \ +++ } +++ +++ INSN(c_lwsp, 0b010, 0b10); +++ +++#undef INSN +++ +++#define INSN(NAME, funct3, op) \ +++ void NAME(Register Rd_Rs2, Register Rs1, uint32_t uimm) { \ +++ assert_cond(is_uimm7(uimm)); \ +++ assert_cond((uimm & 0b11) == 0); \ +++ uint16_t insn = 0; \ +++ c_patch((address)&insn, 1, 0, op); \ +++ c_patch_compressed_reg((address)&insn, 2, Rd_Rs2); \ +++ c_patch((address)&insn, 5, 5, (uimm & nth_bit(6)) >> 6); \ +++ c_patch((address)&insn, 6, 6, (uimm & nth_bit(2)) >> 2); \ +++ c_patch_compressed_reg((address)&insn, 7, Rs1); \ +++ c_patch((address)&insn, 12, 10, (uimm & right_n_bits(6)) >> 3); \ +++ c_patch((address)&insn, 15, 13, funct3); \ +++ emit_int16(insn); \ +++ } +++ +++ INSN(c_lw, 0b010, 0b00); +++ INSN(c_sw, 0b110, 0b00); +++ +++#undef INSN +++ +++#define INSN(NAME, funct3, op) \ +++ void NAME() { \ +++ uint16_t insn = 0; \ +++ c_patch((address)&insn, 1, 0, op); \ +++ c_patch((address)&insn, 11, 2, 0x0); \ +++ c_patch((address)&insn, 12, 12, 0b1); \ +++ c_patch((address)&insn, 15, 13, funct3); \ +++ emit_int16(insn); \ +++ } +++ +++ INSN(c_ebreak, 0b100, 0b10); +++ +++#undef INSN +++ +++// -------------- RVC Transformation Functions -------------- +++ +++// -------------------------- +++// Register instructions +++// -------------------------- +++#define INSN(NAME) \ +++ void NAME(Register Rd, Register Rs1, Register Rs2) { \ +++ /* add -> c.add */ \ +++ if (do_compress()) { \ +++ Register src = noreg; \ +++ if (Rs1 != x0 && Rs2 != x0 && ((src = Rs1, Rs2 == Rd) || (src = Rs2, Rs1 == Rd))) { \ +++ c_add(Rd, src); \ +++ return; \ +++ } \ +++ } \ +++ _add(Rd, Rs1, Rs2); \ +++ } +++ +++ INSN(add); +++ +++#undef INSN +++ +++// -------------------------- +++#define INSN(NAME, C_NAME, NORMAL_NAME) \ +++ void NAME(Register Rd, Register Rs1, Register Rs2) { \ +++ /* sub/subw -> c.sub/c.subw */ \ +++ if (do_compress() && \ +++ (Rd == Rs1 && Rd->is_compressed_valid() && Rs2->is_compressed_valid())) { \ +++ C_NAME(Rd, Rs2); \ +++ return; \ +++ } \ +++ NORMAL_NAME(Rd, Rs1, Rs2); \ +++ } +++ +++ INSN(sub, c_sub, _sub); +++ INSN(subw, c_subw, _subw); +++ +++#undef INSN +++ +++// -------------------------- +++#define INSN(NAME, C_NAME, NORMAL_NAME) \ +++ void NAME(Register Rd, Register Rs1, Register Rs2) { \ +++ /* and/or/xor/addw -> c.and/c.or/c.xor/c.addw */ \ +++ if (do_compress()) { \ +++ Register src = noreg; \ +++ if (Rs1->is_compressed_valid() && Rs2->is_compressed_valid() && \ +++ ((src = Rs1, Rs2 == Rd) || (src = Rs2, Rs1 == Rd))) { \ +++ C_NAME(Rd, src); \ +++ return; \ +++ } \ +++ } \ +++ NORMAL_NAME(Rd, Rs1, Rs2); \ +++ } +++ +++ INSN(andr, c_and, _andr); +++ INSN(orr, c_or, _orr); +++ INSN(xorr, c_xor, _xorr); +++ INSN(addw, c_addw, _addw); +++ +++#undef INSN +++ +++private: +++// some helper functions +++#define FUNC(NAME, funct3, bits) \ +++ bool NAME(Register rs1, Register rd_rs2, int32_t imm12, bool ld) { \ +++ return rs1 == sp && \ +++ is_uimm(imm12, bits) && \ +++ (intx(imm12) & funct3) == 0x0 && \ +++ (!ld || rd_rs2 != x0); \ +++ } \ +++ +++ FUNC(is_c_ldsdsp, 0b111, 9); +++ FUNC(is_c_lwswsp, 0b011, 8); +++ +++#undef FUNC +++ +++#define FUNC(NAME, funct3, bits) \ +++ bool NAME(Register rs1, int32_t imm12) { \ +++ return rs1 == sp && \ +++ is_uimm(imm12, bits) && \ +++ (intx(imm12) & funct3) == 0x0; \ +++ } \ +++ +++ FUNC(is_c_fldsdsp, 0b111, 9); +++ +++#undef FUNC +++ +++#define FUNC(NAME, REG_TYPE, funct3, bits) \ +++ bool NAME(Register rs1, REG_TYPE rd_rs2, int32_t imm12) { \ +++ return rs1->is_compressed_valid() && \ +++ rd_rs2->is_compressed_valid() && \ +++ is_uimm(imm12, bits) && \ +++ (intx(imm12) & funct3) == 0x0; \ +++ } \ +++ +++ FUNC(is_c_ldsd, Register, 0b111, 8); +++ FUNC(is_c_lwsw, Register, 0b011, 7); +++ FUNC(is_c_fldsd, FloatRegister, 0b111, 8); +++ +++#undef FUNC +++ +++public: +++ bool do_compress() const { +++ return UseRVC && in_compressible_region(); +++ } +++ +++// -------------------------- +++// Load/store register +++// -------------------------- +++#define INSN(NAME) \ +++ void NAME(Register Rd, Register Rs, const int32_t offset) { \ +++ /* lw -> c.lwsp/c.lw */ \ +++ if (do_compress()) { \ +++ if (is_c_lwswsp(Rs, Rd, offset, true)) { \ +++ c_lwsp(Rd, offset); \ +++ return; \ +++ } else if (is_c_lwsw(Rs, Rd, offset)) { \ +++ c_lw(Rd, Rs, offset); \ +++ return; \ +++ } \ +++ } \ +++ _lw(Rd, Rs, offset); \ +++ } +++ +++ INSN(lw); +++ +++#undef INSN +++ +++// -------------------------- +++#define INSN(NAME) \ +++ void NAME(Register Rd, Register Rs, const int32_t offset) { \ +++ /* ld -> c.ldsp/c.ld */ \ +++ if (do_compress()) { \ +++ if (is_c_ldsdsp(Rs, Rd, offset, true)) { \ +++ c_ldsp(Rd, offset); \ +++ return; \ +++ } else if (is_c_ldsd(Rs, Rd, offset)) { \ +++ c_ld(Rd, Rs, offset); \ +++ return; \ +++ } \ +++ } \ +++ _ld(Rd, Rs, offset); \ +++ } +++ +++ INSN(ld); +++ +++#undef INSN +++ +++// -------------------------- +++#define INSN(NAME) \ +++ void NAME(FloatRegister Rd, Register Rs, const int32_t offset) { \ +++ /* fld -> c.fldsp/c.fld */ \ +++ if (do_compress()) { \ +++ if (is_c_fldsdsp(Rs, offset)) { \ +++ c_fldsp(Rd, offset); \ +++ return; \ +++ } else if (is_c_fldsd(Rs, Rd, offset)) { \ +++ c_fld(Rd, Rs, offset); \ +++ return; \ +++ } \ +++ } \ +++ _fld(Rd, Rs, offset); \ +++ } +++ +++ INSN(fld); +++ +++#undef INSN +++ +++// -------------------------- +++#define INSN(NAME) \ +++ void NAME(Register Rd, Register Rs, const int32_t offset) { \ +++ /* sd -> c.sdsp/c.sd */ \ +++ if (do_compress()) { \ +++ if (is_c_ldsdsp(Rs, Rd, offset, false)) { \ +++ c_sdsp(Rd, offset); \ +++ return; \ +++ } else if (is_c_ldsd(Rs, Rd, offset)) { \ +++ c_sd(Rd, Rs, offset); \ +++ return; \ +++ } \ +++ } \ +++ _sd(Rd, Rs, offset); \ +++ } +++ +++ INSN(sd); +++ +++#undef INSN +++ +++// -------------------------- +++#define INSN(NAME) \ +++ void NAME(Register Rd, Register Rs, const int32_t offset) { \ +++ /* sw -> c.swsp/c.sw */ \ +++ if (do_compress()) { \ +++ if (is_c_lwswsp(Rs, Rd, offset, false)) { \ +++ c_swsp(Rd, offset); \ +++ return; \ +++ } else if (is_c_lwsw(Rs, Rd, offset)) { \ +++ c_sw(Rd, Rs, offset); \ +++ return; \ +++ } \ +++ } \ +++ _sw(Rd, Rs, offset); \ +++ } +++ +++ INSN(sw); +++ +++#undef INSN +++ +++// -------------------------- +++#define INSN(NAME) \ +++ void NAME(FloatRegister Rd, Register Rs, const int32_t offset) { \ +++ /* fsd -> c.fsdsp/c.fsd */ \ +++ if (do_compress()) { \ +++ if (is_c_fldsdsp(Rs, offset)) { \ +++ c_fsdsp(Rd, offset); \ +++ return; \ +++ } else if (is_c_fldsd(Rs, Rd, offset)) { \ +++ c_fsd(Rd, Rs, offset); \ +++ return; \ +++ } \ +++ } \ +++ _fsd(Rd, Rs, offset); \ +++ } +++ +++ INSN(fsd); +++ +++#undef INSN +++ +++// -------------------------- +++// Unconditional branch instructions +++// -------------------------- +++#define INSN(NAME) \ +++ void NAME(Register Rd, Register Rs, const int32_t offset) { \ +++ /* jalr -> c.jr/c.jalr */ \ +++ if (do_compress() && (offset == 0 && Rs != x0)) { \ +++ if (Rd == x1) { \ +++ c_jalr(Rs); \ +++ return; \ +++ } else if (Rd == x0) { \ +++ c_jr(Rs); \ +++ return; \ +++ } \ +++ } \ +++ _jalr(Rd, Rs, offset); \ +++ } +++ +++ INSN(jalr); +++ +++#undef INSN +++ +++// -------------------------- +++// Miscellaneous Instructions +++// -------------------------- +++#define INSN(NAME) \ +++ void NAME() { \ +++ /* ebreak -> c.ebreak */ \ +++ if (do_compress()) { \ +++ c_ebreak(); \ +++ return; \ +++ } \ +++ _ebreak(); \ +++ } +++ +++ INSN(ebreak); +++ +++#undef INSN +++ +++// -------------------------- +++// Immediate Instructions +++// -------------------------- +++#define INSN(NAME) \ +++ void NAME(Register Rd, Register Rs1, int32_t imm) { \ +++ /* addi -> c.addi/c.nop/c.mv/c.addi16sp/c.addi4spn */ \ +++ if (do_compress()) { \ +++ if (Rd == Rs1 && is_simm6(imm)) { \ +++ c_addi(Rd, imm); \ +++ return; \ +++ } else if (imm == 0 && Rd != x0 && Rs1 != x0) { \ +++ c_mv(Rd, Rs1); \ +++ return; \ +++ } else if (Rs1 == sp && imm != 0) { \ +++ if (Rd == Rs1 && (imm & 0b1111) == 0x0 && is_simm10(imm)) { \ +++ c_addi16sp(imm); \ +++ return; \ +++ } else if (Rd->is_compressed_valid() && (imm & 0b11) == 0x0 && is_uimm10(imm)) { \ +++ c_addi4spn(Rd, imm); \ +++ return; \ +++ } \ +++ } \ +++ } \ +++ _addi(Rd, Rs1, imm); \ +++ } +++ +++ INSN(addi); +++ +++#undef INSN +++ +++// -------------------------- +++#define INSN(NAME) \ +++ void NAME(Register Rd, Register Rs1, int32_t imm) { \ +++ /* addiw -> c.addiw */ \ +++ if (do_compress() && (Rd == Rs1 && Rd != x0 && is_simm6(imm))) { \ +++ c_addiw(Rd, imm); \ +++ return; \ +++ } \ +++ _addiw(Rd, Rs1, imm); \ +++ } +++ +++ INSN(addiw); +++ +++#undef INSN +++ +++// -------------------------- +++#define INSN(NAME) \ +++ void NAME(Register Rd, Register Rs1, int32_t imm) { \ +++ /* and_imm12 -> c.andi */ \ +++ if (do_compress() && \ +++ (Rd == Rs1 && Rd->is_compressed_valid() && is_simm6(imm))) { \ +++ c_andi(Rd, imm); \ +++ return; \ +++ } \ +++ _and_imm12(Rd, Rs1, imm); \ +++ } +++ +++ INSN(and_imm12); +++ +++#undef INSN +++ +++// -------------------------- +++// Shift Immediate Instructions +++// -------------------------- +++#define INSN(NAME) \ +++ void NAME(Register Rd, Register Rs1, unsigned shamt) { \ +++ /* slli -> c.slli */ \ +++ if (do_compress() && (Rd == Rs1 && Rd != x0 && shamt != 0)) { \ +++ c_slli(Rd, shamt); \ +++ return; \ +++ } \ +++ _slli(Rd, Rs1, shamt); \ +++ } +++ +++ INSN(slli); +++ +++#undef INSN +++ +++// -------------------------- +++#define INSN(NAME, C_NAME, NORMAL_NAME) \ +++ void NAME(Register Rd, Register Rs1, unsigned shamt) { \ +++ /* srai/srli -> c.srai/c.srli */ \ +++ if (do_compress() && (Rd == Rs1 && Rd->is_compressed_valid() && shamt != 0)) { \ +++ C_NAME(Rd, shamt); \ +++ return; \ +++ } \ +++ NORMAL_NAME(Rd, Rs1, shamt); \ +++ } +++ +++ INSN(srai, c_srai, _srai); +++ INSN(srli, c_srli, _srli); +++ +++#undef INSN +++ +++// -------------------------- +++// Upper Immediate Instruction +++// -------------------------- +++#define INSN(NAME) \ +++ void NAME(Register Rd, int32_t imm) { \ +++ /* lui -> c.lui */ \ +++ if (do_compress() && (Rd != x0 && Rd != x2 && imm != 0 && is_simm18(imm))) { \ +++ c_lui(Rd, imm); \ +++ return; \ +++ } \ +++ _lui(Rd, imm); \ +++ } +++ +++ INSN(lui); +++ +++#undef INSN +++ +++// --------------------------------------------------------------------------------------- +++ +++#define INSN(NAME, REGISTER) \ +++ void NAME(Register Rs) { \ +++ jalr(REGISTER, Rs, 0); \ +++ } +++ +++ INSN(jr, x0); +++ INSN(jalr, x1); +++ +++#undef INSN +++ +++ // Stack overflow checking +++ virtual void bang_stack_with_offset(int offset) { Unimplemented(); } +++ +++ static bool is_simm5(int64_t x); +++ static bool is_simm6(int64_t x); +++ static bool is_simm12(int64_t x); +++ static bool is_simm13(int64_t x); +++ static bool is_simm18(int64_t x); +++ static bool is_simm21(int64_t x); +++ +++ static bool is_uimm3(uint64_t x); +++ static bool is_uimm5(uint64_t x); +++ static bool is_uimm6(uint64_t x); +++ static bool is_uimm7(uint64_t x); +++ static bool is_uimm8(uint64_t x); +++ static bool is_uimm9(uint64_t x); +++ static bool is_uimm10(uint64_t x); +++ +++ // The maximum range of a branch is fixed for the RISCV architecture. +++ static const unsigned long branch_range = 1 * M; +++ +++ static bool reachable_from_branch_at(address branch, address target) { +++ return uabs(target - branch) < branch_range; +++ } +++ +++ Assembler(CodeBuffer* code) : AbstractAssembler(code), _in_compressible_region(false) {} +++ +++ virtual ~Assembler() {} +++}; +++ +++class BiasedLockingCounters; +++ +++#endif // CPU_RISCV_ASSEMBLER_RISCV_HPP ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/assembler_riscv.inline.hpp ++@@ -0,0 +1,49 @@ +++/* +++ * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2014, Red Hat Inc. All rights reserved. +++ * Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#ifndef CPU_RISCV_ASSEMBLER_RISCV_INLINE_HPP +++#define CPU_RISCV_ASSEMBLER_RISCV_INLINE_HPP +++ +++#include "asm/assembler.inline.hpp" +++#include "asm/codeBuffer.hpp" +++#include "code/codeCache.hpp" +++ +++inline bool Assembler::is_simm5(int64_t x) { return is_simm(x, 5); } +++inline bool Assembler::is_simm6(int64_t x) { return is_simm(x, 6); } +++inline bool Assembler::is_simm12(int64_t x) { return is_simm(x, 12); } +++inline bool Assembler::is_simm13(int64_t x) { return is_simm(x, 13); } +++inline bool Assembler::is_simm18(int64_t x) { return is_simm(x, 18); } +++inline bool Assembler::is_simm21(int64_t x) { return is_simm(x, 21); } +++ +++inline bool Assembler::is_uimm3(uint64_t x) { return is_uimm(x, 3); } +++inline bool Assembler::is_uimm5(uint64_t x) { return is_uimm(x, 5); } +++inline bool Assembler::is_uimm6(uint64_t x) { return is_uimm(x, 6); } +++inline bool Assembler::is_uimm7(uint64_t x) { return is_uimm(x, 7); } +++inline bool Assembler::is_uimm8(uint64_t x) { return is_uimm(x, 8); } +++inline bool Assembler::is_uimm9(uint64_t x) { return is_uimm(x, 9); } +++inline bool Assembler::is_uimm10(uint64_t x) { return is_uimm(x, 10); } +++ +++#endif // CPU_RISCV_ASSEMBLER_RISCV_INLINE_HPP ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/bytes_riscv.hpp ++@@ -0,0 +1,169 @@ +++/* +++ * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2012, 2016 SAP SE. All rights reserved. +++ * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#ifndef CPU_RISCV_BYTES_RISCV_HPP +++#define CPU_RISCV_BYTES_RISCV_HPP +++ +++#include "memory/allStatic.hpp" +++ +++class Bytes: AllStatic { +++ public: +++ // Efficient reading and writing of unaligned unsigned data in platform-specific byte ordering +++ // RISCV needs to check for alignment. +++ +++ // Forward declarations of the compiler-dependent implementation +++ static inline u2 swap_u2(u2 x); +++ static inline u4 swap_u4(u4 x); +++ static inline u8 swap_u8(u8 x); +++ +++ static inline u2 get_native_u2(address p) { +++ if ((intptr_t(p) & 1) == 0) { +++ return *(u2*)p; +++ } else { +++ return ((u2)(p[1]) << 8) | +++ ((u2)(p[0])); +++ } +++ } +++ +++ static inline u4 get_native_u4(address p) { +++ switch (intptr_t(p) & 3) { +++ case 0: +++ return *(u4*)p; +++ +++ case 2: +++ return ((u4)(((u2*)p)[1]) << 16) | +++ ((u4)(((u2*)p)[0])); +++ +++ default: +++ return ((u4)(p[3]) << 24) | +++ ((u4)(p[2]) << 16) | +++ ((u4)(p[1]) << 8) | +++ ((u4)(p[0])); +++ } +++ } +++ +++ static inline u8 get_native_u8(address p) { +++ switch (intptr_t(p) & 7) { +++ case 0: +++ return *(u8*)p; +++ +++ case 4: +++ return ((u8)(((u4*)p)[1]) << 32) | +++ ((u8)(((u4*)p)[0])); +++ +++ case 2: +++ case 6: +++ return ((u8)(((u2*)p)[3]) << 48) | +++ ((u8)(((u2*)p)[2]) << 32) | +++ ((u8)(((u2*)p)[1]) << 16) | +++ ((u8)(((u2*)p)[0])); +++ +++ default: +++ return ((u8)(p[7]) << 56) | +++ ((u8)(p[6]) << 48) | +++ ((u8)(p[5]) << 40) | +++ ((u8)(p[4]) << 32) | +++ ((u8)(p[3]) << 24) | +++ ((u8)(p[2]) << 16) | +++ ((u8)(p[1]) << 8) | +++ ((u8)(p[0])); +++ } +++ } +++ +++ static inline void put_native_u2(address p, u2 x) { +++ if ((intptr_t(p) & 1) == 0) { +++ *(u2*)p = x; +++ } else { +++ p[1] = x >> 8; +++ p[0] = x; +++ } +++ } +++ +++ static inline void put_native_u4(address p, u4 x) { +++ switch (intptr_t(p) & 3) { +++ case 0: +++ *(u4*)p = x; +++ break; +++ +++ case 2: +++ ((u2*)p)[1] = x >> 16; +++ ((u2*)p)[0] = x; +++ break; +++ +++ default: +++ ((u1*)p)[3] = x >> 24; +++ ((u1*)p)[2] = x >> 16; +++ ((u1*)p)[1] = x >> 8; +++ ((u1*)p)[0] = x; +++ break; +++ } +++ } +++ +++ static inline void put_native_u8(address p, u8 x) { +++ switch (intptr_t(p) & 7) { +++ case 0: +++ *(u8*)p = x; +++ break; +++ +++ case 4: +++ ((u4*)p)[1] = x >> 32; +++ ((u4*)p)[0] = x; +++ break; +++ +++ case 2: +++ case 6: +++ ((u2*)p)[3] = x >> 48; +++ ((u2*)p)[2] = x >> 32; +++ ((u2*)p)[1] = x >> 16; +++ ((u2*)p)[0] = x; +++ break; +++ +++ default: +++ ((u1*)p)[7] = x >> 56; +++ ((u1*)p)[6] = x >> 48; +++ ((u1*)p)[5] = x >> 40; +++ ((u1*)p)[4] = x >> 32; +++ ((u1*)p)[3] = x >> 24; +++ ((u1*)p)[2] = x >> 16; +++ ((u1*)p)[1] = x >> 8; +++ ((u1*)p)[0] = x; +++ break; +++ } +++ } +++ +++ // Efficient reading and writing of unaligned unsigned data in Java byte ordering (i.e. big-endian ordering) +++ static inline u2 get_Java_u2(address p) { return swap_u2(get_native_u2(p)); } +++ static inline u4 get_Java_u4(address p) { return swap_u4(get_native_u4(p)); } +++ static inline u8 get_Java_u8(address p) { return swap_u8(get_native_u8(p)); } +++ +++ static inline void put_Java_u2(address p, u2 x) { put_native_u2(p, swap_u2(x)); } +++ static inline void put_Java_u4(address p, u4 x) { put_native_u4(p, swap_u4(x)); } +++ static inline void put_Java_u8(address p, u8 x) { put_native_u8(p, swap_u8(x)); } +++}; +++ +++#include OS_CPU_HEADER(bytes) +++ +++#endif // CPU_RISCV_BYTES_RISCV_HPP ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/c1_CodeStubs_riscv.cpp ++@@ -0,0 +1,357 @@ +++/* +++ * Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2014, Red Hat Inc. All rights reserved. +++ * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#include "precompiled.hpp" +++#include "asm/macroAssembler.inline.hpp" +++#include "c1/c1_CodeStubs.hpp" +++#include "c1/c1_FrameMap.hpp" +++#include "c1/c1_LIRAssembler.hpp" +++#include "c1/c1_MacroAssembler.hpp" +++#include "c1/c1_Runtime1.hpp" +++#include "classfile/javaClasses.hpp" +++#include "nativeInst_riscv.hpp" +++#include "runtime/sharedRuntime.hpp" +++#include "vmreg_riscv.inline.hpp" +++ +++ +++#define __ ce->masm()-> +++ +++void C1SafepointPollStub::emit_code(LIR_Assembler* ce) { +++ __ bind(_entry); +++ InternalAddress safepoint_pc(__ pc() - __ offset() + safepoint_offset()); +++ __ relocate(safepoint_pc.rspec(), [&] { +++ __ la(t0, safepoint_pc.target()); +++ }); +++ __ sd(t0, Address(xthread, JavaThread::saved_exception_pc_offset())); +++ +++ assert(SharedRuntime::polling_page_return_handler_blob() != NULL, +++ "polling page return stub not created yet"); +++ address stub = SharedRuntime::polling_page_return_handler_blob()->entry_point(); +++ +++ __ far_jump(RuntimeAddress(stub)); +++} +++ +++void CounterOverflowStub::emit_code(LIR_Assembler* ce) { +++ __ bind(_entry); +++ Metadata *m = _method->as_constant_ptr()->as_metadata(); +++ __ mov_metadata(t0, m); +++ ce->store_parameter(t0, 1); +++ ce->store_parameter(_bci, 0); +++ __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::counter_overflow_id))); +++ ce->add_call_info_here(_info); +++ ce->verify_oop_map(_info); +++ __ j(_continuation); +++} +++ +++RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index, LIR_Opr array) +++ : _index(index), _array(array), _throw_index_out_of_bounds_exception(false) { +++ assert(info != NULL, "must have info"); +++ _info = new CodeEmitInfo(info); +++} +++ +++RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index) +++ : _index(index), _array(NULL), _throw_index_out_of_bounds_exception(true) { +++ assert(info != NULL, "must have info"); +++ _info = new CodeEmitInfo(info); +++} +++ +++void RangeCheckStub::emit_code(LIR_Assembler* ce) { +++ __ bind(_entry); +++ if (_info->deoptimize_on_exception()) { +++ address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id); +++ __ far_call(RuntimeAddress(a)); +++ ce->add_call_info_here(_info); +++ ce->verify_oop_map(_info); +++ debug_only(__ should_not_reach_here()); +++ return; +++ } +++ +++ if (_index->is_cpu_register()) { +++ __ mv(t0, _index->as_register()); +++ } else { +++ __ mv(t0, _index->as_jint()); +++ } +++ Runtime1::StubID stub_id; +++ if (_throw_index_out_of_bounds_exception) { +++ stub_id = Runtime1::throw_index_exception_id; +++ } else { +++ assert(_array != NULL, "sanity"); +++ __ mv(t1, _array->as_pointer_register()); +++ stub_id = Runtime1::throw_range_check_failed_id; +++ } +++ RuntimeAddress target(Runtime1::entry_for(stub_id)); +++ __ relocate(target.rspec(), [&] { +++ int32_t offset; +++ __ la_patchable(ra, target, offset); +++ __ jalr(ra, ra, offset); +++ }); +++ ce->add_call_info_here(_info); +++ ce->verify_oop_map(_info); +++ debug_only(__ should_not_reach_here()); +++} +++ +++PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) { +++ _info = new CodeEmitInfo(info); +++} +++ +++void PredicateFailedStub::emit_code(LIR_Assembler* ce) { +++ __ bind(_entry); +++ address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id); +++ __ far_call(RuntimeAddress(a)); +++ ce->add_call_info_here(_info); +++ ce->verify_oop_map(_info); +++ debug_only(__ should_not_reach_here()); +++} +++ +++void DivByZeroStub::emit_code(LIR_Assembler* ce) { +++ if (_offset != -1) { +++ ce->compilation()->implicit_exception_table()->append(_offset, __ offset()); +++ } +++ __ bind(_entry); +++ __ far_call(Address(Runtime1::entry_for(Runtime1::throw_div0_exception_id), relocInfo::runtime_call_type)); +++ ce->add_call_info_here(_info); +++ ce->verify_oop_map(_info); +++#ifdef ASSERT +++ __ should_not_reach_here(); +++#endif +++} +++ +++// Implementation of NewInstanceStub +++NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) { +++ _result = result; +++ _klass = klass; +++ _klass_reg = klass_reg; +++ _info = new CodeEmitInfo(info); +++ assert(stub_id == Runtime1::new_instance_id || +++ stub_id == Runtime1::fast_new_instance_id || +++ stub_id == Runtime1::fast_new_instance_init_check_id, +++ "need new_instance id"); +++ _stub_id = stub_id; +++} +++ +++void NewInstanceStub::emit_code(LIR_Assembler* ce) { +++ assert(__ rsp_offset() == 0, "frame size should be fixed"); +++ __ bind(_entry); +++ __ mv(x13, _klass_reg->as_register()); +++ __ far_call(RuntimeAddress(Runtime1::entry_for(_stub_id))); +++ ce->add_call_info_here(_info); +++ ce->verify_oop_map(_info); +++ assert(_result->as_register() == x10, "result must in x10"); +++ __ j(_continuation); +++} +++ +++// Implementation of NewTypeArrayStub +++NewTypeArrayStub::NewTypeArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) { +++ _klass_reg = klass_reg; +++ _length = length; +++ _result = result; +++ _info = new CodeEmitInfo(info); +++} +++ +++void NewTypeArrayStub::emit_code(LIR_Assembler* ce) { +++ assert(__ rsp_offset() == 0, "frame size should be fixed"); +++ __ bind(_entry); +++ assert(_length->as_register() == x9, "length must in x9"); +++ assert(_klass_reg->as_register() == x13, "klass_reg must in x13"); +++ __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_type_array_id))); +++ ce->add_call_info_here(_info); +++ ce->verify_oop_map(_info); +++ assert(_result->as_register() == x10, "result must in x10"); +++ __ j(_continuation); +++} +++ +++// Implementation of NewObjectArrayStub +++NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) { +++ _klass_reg = klass_reg; +++ _result = result; +++ _length = length; +++ _info = new CodeEmitInfo(info); +++} +++ +++void NewObjectArrayStub::emit_code(LIR_Assembler* ce) { +++ assert(__ rsp_offset() == 0, "frame size should be fixed"); +++ __ bind(_entry); +++ assert(_length->as_register() == x9, "length must in x9"); +++ assert(_klass_reg->as_register() == x13, "klass_reg must in x13"); +++ __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_object_array_id))); +++ ce->add_call_info_here(_info); +++ ce->verify_oop_map(_info); +++ assert(_result->as_register() == x10, "result must in x10"); +++ __ j(_continuation); +++} +++ +++// Implementation of MonitorAccessStubs +++MonitorEnterStub::MonitorEnterStub(LIR_Opr obj_reg, LIR_Opr lock_reg, CodeEmitInfo* info) +++: MonitorAccessStub(obj_reg, lock_reg) { +++ _info = new CodeEmitInfo(info); +++} +++ +++void MonitorEnterStub::emit_code(LIR_Assembler* ce) { +++ assert(__ rsp_offset() == 0, "frame size should be fixed"); +++ __ bind(_entry); +++ ce->store_parameter(_obj_reg->as_register(), 1); +++ ce->store_parameter(_lock_reg->as_register(), 0); +++ Runtime1::StubID enter_id; +++ if (ce->compilation()->has_fpu_code()) { +++ enter_id = Runtime1::monitorenter_id; +++ } else { +++ enter_id = Runtime1::monitorenter_nofpu_id; +++ } +++ __ far_call(RuntimeAddress(Runtime1::entry_for(enter_id))); +++ ce->add_call_info_here(_info); +++ ce->verify_oop_map(_info); +++ __ j(_continuation); +++} +++ +++void MonitorExitStub::emit_code(LIR_Assembler* ce) { +++ __ bind(_entry); +++ if (_compute_lock) { +++ // lock_reg was destroyed by fast unlocking attempt => recompute it +++ ce->monitor_address(_monitor_ix, _lock_reg); +++ } +++ ce->store_parameter(_lock_reg->as_register(), 0); +++ // note: non-blocking leaf routine => no call info needed +++ Runtime1::StubID exit_id; +++ if (ce->compilation()->has_fpu_code()) { +++ exit_id = Runtime1::monitorexit_id; +++ } else { +++ exit_id = Runtime1::monitorexit_nofpu_id; +++ } +++ __ la(ra, _continuation); +++ __ far_jump(RuntimeAddress(Runtime1::entry_for(exit_id))); +++} +++ +++// Implementation of patching: +++// - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes) +++// - Replace original code with a call to the stub +++// At Runtime: +++// - call to stub, jump to runtime +++// - in runtime: preserve all registers (rspecially objects, i.e., source and destination object) +++// - in runtime: after initializing class, restore original code, reexecute instruction +++ +++int PatchingStub::_patch_info_offset = -NativeGeneralJump::instruction_size; +++ +++void PatchingStub::align_patch_site(MacroAssembler* masm) {} +++ +++void PatchingStub::emit_code(LIR_Assembler* ce) { +++ assert(false, "RISCV should not use C1 runtime patching"); +++} +++ +++void DeoptimizeStub::emit_code(LIR_Assembler* ce) { +++ __ bind(_entry); +++ ce->store_parameter(_trap_request, 0); +++ __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::deoptimize_id))); +++ ce->add_call_info_here(_info); +++ DEBUG_ONLY(__ should_not_reach_here()); +++} +++ +++void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) { +++ address a = NULL; +++ if (_info->deoptimize_on_exception()) { +++ // Deoptimize, do not throw the exception, because it is probably wrong to do it here. +++ a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id); +++ } else { +++ a = Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id); +++ } +++ +++ ce->compilation()->implicit_exception_table()->append(_offset, __ offset()); +++ __ bind(_entry); +++ __ far_call(RuntimeAddress(a)); +++ ce->add_call_info_here(_info); +++ ce->verify_oop_map(_info); +++ debug_only(__ should_not_reach_here()); +++} +++ +++void SimpleExceptionStub::emit_code(LIR_Assembler* ce) { +++ assert(__ rsp_offset() == 0, "frame size should be fixed"); +++ +++ __ bind(_entry); +++ // pass the object in a tmp register because all other registers +++ // must be preserved +++ if (_obj->is_cpu_register()) { +++ __ mv(t0, _obj->as_register()); +++ } +++ __ far_call(RuntimeAddress(Runtime1::entry_for(_stub)), NULL, t1); +++ ce->add_call_info_here(_info); +++ debug_only(__ should_not_reach_here()); +++} +++ +++void ArrayCopyStub::emit_code(LIR_Assembler* ce) { +++ // ---------------slow case: call to native----------------- +++ __ bind(_entry); +++ // Figure out where the args should go +++ // This should really convert the IntrinsicID to the Method* and signature +++ // but I don't know how to do that. +++ const int args_num = 5; +++ VMRegPair args[args_num]; +++ BasicType signature[args_num] = { T_OBJECT, T_INT, T_OBJECT, T_INT, T_INT }; +++ SharedRuntime::java_calling_convention(signature, args, args_num); +++ +++ // push parameters +++ Register r[args_num]; +++ r[0] = src()->as_register(); +++ r[1] = src_pos()->as_register(); +++ r[2] = dst()->as_register(); +++ r[3] = dst_pos()->as_register(); +++ r[4] = length()->as_register(); +++ +++ // next registers will get stored on the stack +++ for (int j = 0; j < args_num; j++) { +++ VMReg r_1 = args[j].first(); +++ if (r_1->is_stack()) { +++ int st_off = r_1->reg2stack() * wordSize; +++ __ sd(r[j], Address(sp, st_off)); +++ } else { +++ assert(r[j] == args[j].first()->as_Register(), "Wrong register for arg"); +++ } +++ } +++ +++ ce->align_call(lir_static_call); +++ +++ ce->emit_static_call_stub(); +++ if (ce->compilation()->bailed_out()) { +++ return; // CodeCache is full +++ } +++ Address resolve(SharedRuntime::get_resolve_static_call_stub(), +++ relocInfo::static_call_type); +++ address call = __ trampoline_call(resolve); +++ if (call == NULL) { +++ ce->bailout("trampoline stub overflow"); +++ return; +++ } +++ ce->add_call_info_here(info()); +++ +++#ifndef PRODUCT +++ if (PrintC1Statistics) { +++ __ la(t1, ExternalAddress((address)&Runtime1::_arraycopy_slowcase_cnt)); +++ __ incrementw(Address(t1)); +++ } +++#endif +++ +++ __ j(_continuation); +++} +++ +++#undef __ ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/c1_Defs_riscv.hpp ++@@ -0,0 +1,84 @@ +++/* +++ * Copyright (c) 2000, 2020, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#ifndef CPU_RISCV_C1_DEFS_RISCV_HPP +++#define CPU_RISCV_C1_DEFS_RISCV_HPP +++ +++// native word offsets from memory address (little endian) +++enum { +++ pd_lo_word_offset_in_bytes = 0, +++ pd_hi_word_offset_in_bytes = BytesPerWord +++}; +++ +++// explicit rounding operations are required to implement the strictFP mode +++enum { +++ pd_strict_fp_requires_explicit_rounding = false +++}; +++ +++// registers +++enum { +++ pd_nof_cpu_regs_frame_map = RegisterImpl::number_of_registers, // number of registers used during code emission +++ pd_nof_fpu_regs_frame_map = FloatRegisterImpl::number_of_registers, // number of float registers used during code emission +++ +++ // caller saved +++ pd_nof_caller_save_cpu_regs_frame_map = 13, // number of registers killed by calls +++ pd_nof_caller_save_fpu_regs_frame_map = 32, // number of float registers killed by calls +++ +++ pd_first_callee_saved_reg = pd_nof_caller_save_cpu_regs_frame_map, +++ pd_last_callee_saved_reg = 21, +++ +++ pd_last_allocatable_cpu_reg = pd_nof_caller_save_cpu_regs_frame_map - 1, +++ +++ pd_nof_cpu_regs_reg_alloc +++ = pd_nof_caller_save_cpu_regs_frame_map, // number of registers that are visible to register allocator +++ pd_nof_fpu_regs_reg_alloc = 32, // number of float registers that are visible to register allocator +++ +++ pd_nof_cpu_regs_linearscan = 32, // number of registers visible to linear scan +++ pd_nof_fpu_regs_linearscan = pd_nof_fpu_regs_frame_map, // number of float registers visible to linear scan +++ pd_nof_xmm_regs_linearscan = 0, // don't have vector registers +++ +++ pd_first_cpu_reg = 0, +++ pd_last_cpu_reg = pd_nof_cpu_regs_reg_alloc - 1, +++ pd_first_byte_reg = 0, +++ pd_last_byte_reg = pd_nof_cpu_regs_reg_alloc - 1, +++ +++ pd_first_fpu_reg = pd_nof_cpu_regs_frame_map, +++ pd_last_fpu_reg = pd_first_fpu_reg + 31, +++ +++ pd_first_callee_saved_fpu_reg_1 = 8 + pd_first_fpu_reg, +++ pd_last_callee_saved_fpu_reg_1 = 9 + pd_first_fpu_reg, +++ pd_first_callee_saved_fpu_reg_2 = 18 + pd_first_fpu_reg, +++ pd_last_callee_saved_fpu_reg_2 = 27 + pd_first_fpu_reg +++}; +++ +++ +++// Encoding of float value in debug info. This is true on x86 where +++// floats are extended to doubles when stored in the stack, false for +++// RISCV where floats and doubles are stored in their native form. +++enum { +++ pd_float_saved_as_double = false +++}; +++ +++#endif // CPU_RISCV_C1_DEFS_RISCV_HPP ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/c1_FpuStackSim_riscv.cpp ++@@ -0,0 +1,30 @@ +++/* +++ * Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++//-------------------------------------------------------- +++// FpuStackSim +++//-------------------------------------------------------- +++ +++// No FPU stack on RISCV ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/c1_FpuStackSim_riscv.hpp ++@@ -0,0 +1,32 @@ +++/* +++ * Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#ifndef CPU_RISCV_C1_FPUSTACKSIM_RISCV_HPP +++#define CPU_RISCV_C1_FPUSTACKSIM_RISCV_HPP +++ +++// No FPU stack on RISCV +++class FpuStackSim; +++ +++#endif // CPU_RISCV_C1_FPUSTACKSIM_RISCV_HPP ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/c1_FrameMap_riscv.cpp ++@@ -0,0 +1,388 @@ +++/* +++ * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#include "precompiled.hpp" +++#include "c1/c1_FrameMap.hpp" +++#include "c1/c1_LIR.hpp" +++#include "runtime/sharedRuntime.hpp" +++#include "vmreg_riscv.inline.hpp" +++ +++LIR_Opr FrameMap::map_to_opr(BasicType type, VMRegPair* reg, bool) { +++ LIR_Opr opr = LIR_OprFact::illegalOpr; +++ VMReg r_1 = reg->first(); +++ VMReg r_2 = reg->second(); +++ if (r_1->is_stack()) { +++ // Convert stack slot to an SP offset +++ // The calling convention does not count the SharedRuntime::out_preserve_stack_slots() value +++ // so we must add it in here. +++ int st_off = (r_1->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size; +++ opr = LIR_OprFact::address(new LIR_Address(sp_opr, st_off, type)); +++ } else if (r_1->is_Register()) { +++ Register reg1 = r_1->as_Register(); +++ if (r_2->is_Register() && (type == T_LONG || type == T_DOUBLE)) { +++ Register reg2 = r_2->as_Register(); +++ assert(reg2 == reg1, "must be same register"); +++ opr = as_long_opr(reg1); +++ } else if (is_reference_type(type)) { +++ opr = as_oop_opr(reg1); +++ } else if (type == T_METADATA) { +++ opr = as_metadata_opr(reg1); +++ } else if (type == T_ADDRESS) { +++ opr = as_address_opr(reg1); +++ } else { +++ opr = as_opr(reg1); +++ } +++ } else if (r_1->is_FloatRegister()) { +++ assert(type == T_DOUBLE || type == T_FLOAT, "wrong type"); +++ int num = r_1->as_FloatRegister()->encoding(); +++ if (type == T_FLOAT) { +++ opr = LIR_OprFact::single_fpu(num); +++ } else { +++ opr = LIR_OprFact::double_fpu(num); +++ } +++ } else { +++ ShouldNotReachHere(); +++ } +++ return opr; +++} +++ +++LIR_Opr FrameMap::zr_opr; +++LIR_Opr FrameMap::r1_opr; +++LIR_Opr FrameMap::r2_opr; +++LIR_Opr FrameMap::r3_opr; +++LIR_Opr FrameMap::r4_opr; +++LIR_Opr FrameMap::r5_opr; +++LIR_Opr FrameMap::r6_opr; +++LIR_Opr FrameMap::r7_opr; +++LIR_Opr FrameMap::r8_opr; +++LIR_Opr FrameMap::r9_opr; +++LIR_Opr FrameMap::r10_opr; +++LIR_Opr FrameMap::r11_opr; +++LIR_Opr FrameMap::r12_opr; +++LIR_Opr FrameMap::r13_opr; +++LIR_Opr FrameMap::r14_opr; +++LIR_Opr FrameMap::r15_opr; +++LIR_Opr FrameMap::r16_opr; +++LIR_Opr FrameMap::r17_opr; +++LIR_Opr FrameMap::r18_opr; +++LIR_Opr FrameMap::r19_opr; +++LIR_Opr FrameMap::r20_opr; +++LIR_Opr FrameMap::r21_opr; +++LIR_Opr FrameMap::r22_opr; +++LIR_Opr FrameMap::r23_opr; +++LIR_Opr FrameMap::r24_opr; +++LIR_Opr FrameMap::r25_opr; +++LIR_Opr FrameMap::r26_opr; +++LIR_Opr FrameMap::r27_opr; +++LIR_Opr FrameMap::r28_opr; +++LIR_Opr FrameMap::r29_opr; +++LIR_Opr FrameMap::r30_opr; +++LIR_Opr FrameMap::r31_opr; +++ +++LIR_Opr FrameMap::fp_opr; +++LIR_Opr FrameMap::sp_opr; +++ +++LIR_Opr FrameMap::receiver_opr; +++ +++LIR_Opr FrameMap::zr_oop_opr; +++LIR_Opr FrameMap::r1_oop_opr; +++LIR_Opr FrameMap::r2_oop_opr; +++LIR_Opr FrameMap::r3_oop_opr; +++LIR_Opr FrameMap::r4_oop_opr; +++LIR_Opr FrameMap::r5_oop_opr; +++LIR_Opr FrameMap::r6_oop_opr; +++LIR_Opr FrameMap::r7_oop_opr; +++LIR_Opr FrameMap::r8_oop_opr; +++LIR_Opr FrameMap::r9_oop_opr; +++LIR_Opr FrameMap::r10_oop_opr; +++LIR_Opr FrameMap::r11_oop_opr; +++LIR_Opr FrameMap::r12_oop_opr; +++LIR_Opr FrameMap::r13_oop_opr; +++LIR_Opr FrameMap::r14_oop_opr; +++LIR_Opr FrameMap::r15_oop_opr; +++LIR_Opr FrameMap::r16_oop_opr; +++LIR_Opr FrameMap::r17_oop_opr; +++LIR_Opr FrameMap::r18_oop_opr; +++LIR_Opr FrameMap::r19_oop_opr; +++LIR_Opr FrameMap::r20_oop_opr; +++LIR_Opr FrameMap::r21_oop_opr; +++LIR_Opr FrameMap::r22_oop_opr; +++LIR_Opr FrameMap::r23_oop_opr; +++LIR_Opr FrameMap::r24_oop_opr; +++LIR_Opr FrameMap::r25_oop_opr; +++LIR_Opr FrameMap::r26_oop_opr; +++LIR_Opr FrameMap::r27_oop_opr; +++LIR_Opr FrameMap::r28_oop_opr; +++LIR_Opr FrameMap::r29_oop_opr; +++LIR_Opr FrameMap::r30_oop_opr; +++LIR_Opr FrameMap::r31_oop_opr; +++ +++LIR_Opr FrameMap::t0_opr; +++LIR_Opr FrameMap::t1_opr; +++LIR_Opr FrameMap::t0_long_opr; +++LIR_Opr FrameMap::t1_long_opr; +++ +++LIR_Opr FrameMap::r10_metadata_opr; +++LIR_Opr FrameMap::r11_metadata_opr; +++LIR_Opr FrameMap::r12_metadata_opr; +++LIR_Opr FrameMap::r13_metadata_opr; +++LIR_Opr FrameMap::r14_metadata_opr; +++LIR_Opr FrameMap::r15_metadata_opr; +++ +++LIR_Opr FrameMap::long10_opr; +++LIR_Opr FrameMap::long11_opr; +++LIR_Opr FrameMap::fpu10_float_opr; +++LIR_Opr FrameMap::fpu10_double_opr; +++ +++LIR_Opr FrameMap::_caller_save_cpu_regs[] = { 0, }; +++LIR_Opr FrameMap::_caller_save_fpu_regs[] = { 0, }; +++ +++//-------------------------------------------------------- +++// FrameMap +++//-------------------------------------------------------- +++// |---f31--| +++// |---..---| +++// |---f28--| +++// |---f27--|<---pd_last_callee_saved_fpu_reg_2 +++// |---..---| +++// |---f18--|<---pd_first_callee_saved_fpu_reg_2 +++// |---f17--| +++// |---..---| +++// |---f10--| +++// |---f9---|<---pd_last_callee_saved_fpu_reg_1 +++// |---f8---|<---pd_first_callee_saved_fpu_reg_1 +++// |---f7---| +++// |---..---| +++// |---f0---| +++// |---x27--| +++// |---x23--| +++// |---x8---| +++// |---x4---| +++// |---x3---| +++// |---x2---| +++// |---x1---| +++// |---x0---| +++// |---x26--|<---pd_last_callee_saved_reg +++// |---..---| +++// |---x18--| +++// |---x9---|<---pd_first_callee_saved_reg +++// |---x31--| +++// |---..---| +++// |---x28--| +++// |---x17--| +++// |---..---| +++// |---x10--| +++// |---x7---| +++ +++void FrameMap::initialize() { +++ assert(!_init_done, "once"); +++ +++ int i = 0; +++ +++ // caller save register +++ map_register(i, x7); r7_opr = LIR_OprFact::single_cpu(i); i++; +++ map_register(i, x10); r10_opr = LIR_OprFact::single_cpu(i); i++; +++ map_register(i, x11); r11_opr = LIR_OprFact::single_cpu(i); i++; +++ map_register(i, x12); r12_opr = LIR_OprFact::single_cpu(i); i++; +++ map_register(i, x13); r13_opr = LIR_OprFact::single_cpu(i); i++; +++ map_register(i, x14); r14_opr = LIR_OprFact::single_cpu(i); i++; +++ map_register(i, x15); r15_opr = LIR_OprFact::single_cpu(i); i++; +++ map_register(i, x16); r16_opr = LIR_OprFact::single_cpu(i); i++; +++ map_register(i, x17); r17_opr = LIR_OprFact::single_cpu(i); i++; +++ map_register(i, x28); r28_opr = LIR_OprFact::single_cpu(i); i++; +++ map_register(i, x29); r29_opr = LIR_OprFact::single_cpu(i); i++; +++ map_register(i, x30); r30_opr = LIR_OprFact::single_cpu(i); i++; +++ map_register(i, x31); r31_opr = LIR_OprFact::single_cpu(i); i++; +++ +++ // callee save register +++ map_register(i, x9); r9_opr = LIR_OprFact::single_cpu(i); i++; +++ map_register(i, x18); r18_opr = LIR_OprFact::single_cpu(i); i++; +++ map_register(i, x19); r19_opr = LIR_OprFact::single_cpu(i); i++; +++ map_register(i, x20); r20_opr = LIR_OprFact::single_cpu(i); i++; +++ map_register(i, x21); r21_opr = LIR_OprFact::single_cpu(i); i++; +++ map_register(i, x22); r22_opr = LIR_OprFact::single_cpu(i); i++; +++ map_register(i, x24); r24_opr = LIR_OprFact::single_cpu(i); i++; +++ map_register(i, x25); r25_opr = LIR_OprFact::single_cpu(i); i++; +++ map_register(i, x26); r26_opr = LIR_OprFact::single_cpu(i); i++; +++ +++ // special register +++ map_register(i, x0); zr_opr = LIR_OprFact::single_cpu(i); i++; // zr +++ map_register(i, x1); r1_opr = LIR_OprFact::single_cpu(i); i++; // ra +++ map_register(i, x2); r2_opr = LIR_OprFact::single_cpu(i); i++; // sp +++ map_register(i, x3); r3_opr = LIR_OprFact::single_cpu(i); i++; // gp +++ map_register(i, x4); r4_opr = LIR_OprFact::single_cpu(i); i++; // thread +++ map_register(i, x8); r8_opr = LIR_OprFact::single_cpu(i); i++; // fp +++ map_register(i, x23); r23_opr = LIR_OprFact::single_cpu(i); i++; // java thread +++ map_register(i, x27); r27_opr = LIR_OprFact::single_cpu(i); i++; // heapbase +++ +++ // tmp register +++ map_register(i, x5); r5_opr = LIR_OprFact::single_cpu(i); i++; // t0 +++ map_register(i, x6); r6_opr = LIR_OprFact::single_cpu(i); i++; // t1 +++ +++ t0_opr = r5_opr; +++ t1_opr = r6_opr; +++ t0_long_opr = LIR_OprFact::double_cpu(r5_opr->cpu_regnr(), r5_opr->cpu_regnr()); +++ t1_long_opr = LIR_OprFact::double_cpu(r6_opr->cpu_regnr(), r6_opr->cpu_regnr()); +++ +++ long10_opr = LIR_OprFact::double_cpu(r10_opr->cpu_regnr(), r10_opr->cpu_regnr()); +++ long11_opr = LIR_OprFact::double_cpu(r11_opr->cpu_regnr(), r11_opr->cpu_regnr()); +++ +++ fpu10_float_opr = LIR_OprFact::single_fpu(10); +++ fpu10_double_opr = LIR_OprFact::double_fpu(10); +++ +++ i = 0; +++ _caller_save_cpu_regs[i++] = r7_opr; +++ _caller_save_cpu_regs[i++] = r10_opr; +++ _caller_save_cpu_regs[i++] = r11_opr; +++ _caller_save_cpu_regs[i++] = r12_opr; +++ _caller_save_cpu_regs[i++] = r13_opr; +++ _caller_save_cpu_regs[i++] = r14_opr; +++ _caller_save_cpu_regs[i++] = r15_opr; +++ _caller_save_cpu_regs[i++] = r16_opr; +++ _caller_save_cpu_regs[i++] = r17_opr; +++ _caller_save_cpu_regs[i++] = r28_opr; +++ _caller_save_cpu_regs[i++] = r29_opr; +++ _caller_save_cpu_regs[i++] = r30_opr; +++ _caller_save_cpu_regs[i++] = r31_opr; +++ +++ _init_done = true; +++ +++ zr_oop_opr = as_oop_opr(x0); +++ r1_oop_opr = as_oop_opr(x1); +++ r2_oop_opr = as_oop_opr(x2); +++ r3_oop_opr = as_oop_opr(x3); +++ r4_oop_opr = as_oop_opr(x4); +++ r5_oop_opr = as_oop_opr(x5); +++ r6_oop_opr = as_oop_opr(x6); +++ r7_oop_opr = as_oop_opr(x7); +++ r8_oop_opr = as_oop_opr(x8); +++ r9_oop_opr = as_oop_opr(x9); +++ r10_oop_opr = as_oop_opr(x10); +++ r11_oop_opr = as_oop_opr(x11); +++ r12_oop_opr = as_oop_opr(x12); +++ r13_oop_opr = as_oop_opr(x13); +++ r14_oop_opr = as_oop_opr(x14); +++ r15_oop_opr = as_oop_opr(x15); +++ r16_oop_opr = as_oop_opr(x16); +++ r17_oop_opr = as_oop_opr(x17); +++ r18_oop_opr = as_oop_opr(x18); +++ r19_oop_opr = as_oop_opr(x19); +++ r20_oop_opr = as_oop_opr(x20); +++ r21_oop_opr = as_oop_opr(x21); +++ r22_oop_opr = as_oop_opr(x22); +++ r23_oop_opr = as_oop_opr(x23); +++ r24_oop_opr = as_oop_opr(x24); +++ r25_oop_opr = as_oop_opr(x25); +++ r26_oop_opr = as_oop_opr(x26); +++ r27_oop_opr = as_oop_opr(x27); +++ r28_oop_opr = as_oop_opr(x28); +++ r29_oop_opr = as_oop_opr(x29); +++ r30_oop_opr = as_oop_opr(x30); +++ r31_oop_opr = as_oop_opr(x31); +++ +++ r10_metadata_opr = as_metadata_opr(x10); +++ r11_metadata_opr = as_metadata_opr(x11); +++ r12_metadata_opr = as_metadata_opr(x12); +++ r13_metadata_opr = as_metadata_opr(x13); +++ r14_metadata_opr = as_metadata_opr(x14); +++ r15_metadata_opr = as_metadata_opr(x15); +++ +++ sp_opr = as_pointer_opr(sp); +++ fp_opr = as_pointer_opr(fp); +++ +++ VMRegPair regs; +++ BasicType sig_bt = T_OBJECT; +++ SharedRuntime::java_calling_convention(&sig_bt, ®s, 1); +++ receiver_opr = as_oop_opr(regs.first()->as_Register()); +++ +++ for (i = 0; i < nof_caller_save_fpu_regs; i++) { +++ _caller_save_fpu_regs[i] = LIR_OprFact::single_fpu(i); +++ } +++} +++ +++ +++Address FrameMap::make_new_address(ByteSize sp_offset) const { +++ return Address(sp, in_bytes(sp_offset)); +++} +++ +++ +++// ----------------mapping----------------------- +++// all mapping is based on fp addressing, except for simple leaf methods where we access +++// the locals sp based (and no frame is built) +++ +++ +++// Frame for simple leaf methods (quick entries) +++// +++// +----------+ +++// | ret addr | <- TOS +++// +----------+ +++// | args | +++// | ...... | +++ +++// Frame for standard methods +++// +++// | .........| <- TOS +++// | locals | +++// +----------+ +++// | old fp, | +++// +----------+ +++// | ret addr | +++// +----------+ +++// | args | <- FP +++// | .........| +++ +++ +++// For OopMaps, map a local variable or spill index to an VMRegImpl name. +++// This is the offset from sp() in the frame of the slot for the index, +++// skewed by VMRegImpl::stack0 to indicate a stack location (vs.a register.) +++// +++// framesize + +++// stack0 stack0 0 <- VMReg +++// | | | +++// ...........|..............|.............| +++// 0 1 2 3 x x 4 5 6 ... | <- local indices +++// ^ ^ sp() ( x x indicate link +++// | | and return addr) +++// arguments non-argument locals +++ +++ +++VMReg FrameMap::fpu_regname (int n) { +++ // Return the OptoReg name for the fpu stack slot "n" +++ // A spilled fpu stack slot comprises to two single-word OptoReg's. +++ return as_FloatRegister(n)->as_VMReg(); +++} +++ +++LIR_Opr FrameMap::stack_pointer() { +++ return FrameMap::sp_opr; +++} +++ +++// JSR 292 +++LIR_Opr FrameMap::method_handle_invoke_SP_save_opr() { +++ return LIR_OprFact::illegalOpr; // Not needed on riscv +++} +++ +++bool FrameMap::validate_frame() { +++ return true; +++} ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/c1_FrameMap_riscv.hpp ++@@ -0,0 +1,148 @@ +++/* +++ * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#ifndef CPU_RISCV_C1_FRAMEMAP_RISCV_HPP +++#define CPU_RISCV_C1_FRAMEMAP_RISCV_HPP +++ +++// On RISCV the frame looks as follows: +++// +++// +-----------------------------+---------+----------------------------------------+----------------+----------- +++// | size_arguments-nof_reg_args | 2 words | size_locals-size_arguments+numreg_args | _size_monitors | spilling . +++// +-----------------------------+---------+----------------------------------------+----------------+----------- +++ +++ public: +++ static const int pd_c_runtime_reserved_arg_size; +++ +++ enum { +++ first_available_sp_in_frame = 0, +++ frame_pad_in_bytes = 16, +++ nof_reg_args = 8 +++ }; +++ +++ public: +++ static LIR_Opr receiver_opr; +++ +++ static LIR_Opr zr_opr; +++ static LIR_Opr r1_opr; +++ static LIR_Opr r2_opr; +++ static LIR_Opr r3_opr; +++ static LIR_Opr r4_opr; +++ static LIR_Opr r5_opr; +++ static LIR_Opr r6_opr; +++ static LIR_Opr r7_opr; +++ static LIR_Opr r8_opr; +++ static LIR_Opr r9_opr; +++ static LIR_Opr r10_opr; +++ static LIR_Opr r11_opr; +++ static LIR_Opr r12_opr; +++ static LIR_Opr r13_opr; +++ static LIR_Opr r14_opr; +++ static LIR_Opr r15_opr; +++ static LIR_Opr r16_opr; +++ static LIR_Opr r17_opr; +++ static LIR_Opr r18_opr; +++ static LIR_Opr r19_opr; +++ static LIR_Opr r20_opr; +++ static LIR_Opr r21_opr; +++ static LIR_Opr r22_opr; +++ static LIR_Opr r23_opr; +++ static LIR_Opr r24_opr; +++ static LIR_Opr r25_opr; +++ static LIR_Opr r26_opr; +++ static LIR_Opr r27_opr; +++ static LIR_Opr r28_opr; +++ static LIR_Opr r29_opr; +++ static LIR_Opr r30_opr; +++ static LIR_Opr r31_opr; +++ static LIR_Opr fp_opr; +++ static LIR_Opr sp_opr; +++ +++ static LIR_Opr zr_oop_opr; +++ static LIR_Opr r1_oop_opr; +++ static LIR_Opr r2_oop_opr; +++ static LIR_Opr r3_oop_opr; +++ static LIR_Opr r4_oop_opr; +++ static LIR_Opr r5_oop_opr; +++ static LIR_Opr r6_oop_opr; +++ static LIR_Opr r7_oop_opr; +++ static LIR_Opr r8_oop_opr; +++ static LIR_Opr r9_oop_opr; +++ static LIR_Opr r10_oop_opr; +++ static LIR_Opr r11_oop_opr; +++ static LIR_Opr r12_oop_opr; +++ static LIR_Opr r13_oop_opr; +++ static LIR_Opr r14_oop_opr; +++ static LIR_Opr r15_oop_opr; +++ static LIR_Opr r16_oop_opr; +++ static LIR_Opr r17_oop_opr; +++ static LIR_Opr r18_oop_opr; +++ static LIR_Opr r19_oop_opr; +++ static LIR_Opr r20_oop_opr; +++ static LIR_Opr r21_oop_opr; +++ static LIR_Opr r22_oop_opr; +++ static LIR_Opr r23_oop_opr; +++ static LIR_Opr r24_oop_opr; +++ static LIR_Opr r25_oop_opr; +++ static LIR_Opr r26_oop_opr; +++ static LIR_Opr r27_oop_opr; +++ static LIR_Opr r28_oop_opr; +++ static LIR_Opr r29_oop_opr; +++ static LIR_Opr r30_oop_opr; +++ static LIR_Opr r31_oop_opr; +++ +++ static LIR_Opr t0_opr; +++ static LIR_Opr t1_opr; +++ static LIR_Opr t0_long_opr; +++ static LIR_Opr t1_long_opr; +++ +++ static LIR_Opr r10_metadata_opr; +++ static LIR_Opr r11_metadata_opr; +++ static LIR_Opr r12_metadata_opr; +++ static LIR_Opr r13_metadata_opr; +++ static LIR_Opr r14_metadata_opr; +++ static LIR_Opr r15_metadata_opr; +++ +++ static LIR_Opr long10_opr; +++ static LIR_Opr long11_opr; +++ static LIR_Opr fpu10_float_opr; +++ static LIR_Opr fpu10_double_opr; +++ +++ static LIR_Opr as_long_opr(Register r) { +++ return LIR_OprFact::double_cpu(cpu_reg2rnr(r), cpu_reg2rnr(r)); +++ } +++ static LIR_Opr as_pointer_opr(Register r) { +++ return LIR_OprFact::double_cpu(cpu_reg2rnr(r), cpu_reg2rnr(r)); +++ } +++ +++ // VMReg name for spilled physical FPU stack slot n +++ static VMReg fpu_regname(int n); +++ +++ static bool is_caller_save_register(LIR_Opr opr) { return true; } +++ static bool is_caller_save_register(Register r) { return true; } +++ +++ static int nof_caller_save_cpu_regs() { return pd_nof_caller_save_cpu_regs_frame_map; } +++ static int last_cpu_reg() { return pd_last_cpu_reg; } +++ +++#endif // CPU_RISCV_C1_FRAMEMAP_RISCV_HPP ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/c1_LIRAssembler_arith_riscv.cpp ++@@ -0,0 +1,281 @@ +++/* +++ * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#include "precompiled.hpp" +++#include "asm/assembler.hpp" +++#include "c1/c1_LIRAssembler.hpp" +++#include "c1/c1_MacroAssembler.hpp" +++ +++#ifndef PRODUCT +++#define COMMENT(x) do { __ block_comment(x); } while (0) +++#else +++#define COMMENT(x) +++#endif +++ +++#define __ _masm-> +++ +++void LIR_Assembler::arithmetic_idiv(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr illegal, +++ LIR_Opr result, CodeEmitInfo* info) { +++ // opcode check +++ assert((code == lir_idiv) || (code == lir_irem), "opcode must be idiv or irem"); +++ bool is_irem = (code == lir_irem); +++ // opreand check +++ assert(left->is_single_cpu(), "left must be a register"); +++ assert(right->is_single_cpu() || right->is_constant(), "right must be a register or constant"); +++ assert(result->is_single_cpu(), "result must be a register"); +++ Register lreg = left->as_register(); +++ Register dreg = result->as_register(); +++ +++ // power-of-2 constant check and codegen +++ if (right->is_constant()) { +++ int c = right->as_constant_ptr()->as_jint(); +++ assert(c > 0 && is_power_of_2(c), "divisor must be power-of-2 constant"); +++ if (is_irem) { +++ if (c == 1) { +++ // move 0 to dreg if divisor is 1 +++ __ mv(dreg, zr); +++ } else { +++ unsigned int shift = exact_log2(c); +++ __ sraiw(t0, lreg, 0x1f); +++ __ srliw(t0, t0, BitsPerInt - shift); +++ __ addw(t1, lreg, t0); +++ if (Assembler::is_simm12(c - 1)) { +++ __ andi(t1, t1, c - 1); +++ } else { +++ __ zero_extend(t1, t1, shift); +++ } +++ __ subw(dreg, t1, t0); +++ } +++ } else { +++ if (c == 1) { +++ // move lreg to dreg if divisor is 1 +++ __ mv(dreg, lreg); +++ } else { +++ unsigned int shift = exact_log2(c); +++ __ sraiw(t0, lreg, 0x1f); +++ if (Assembler::is_simm12(c - 1)) { +++ __ andi(t0, t0, c - 1); +++ } else { +++ __ zero_extend(t0, t0, shift); +++ } +++ __ addw(dreg, t0, lreg); +++ __ sraiw(dreg, dreg, shift); +++ } +++ } +++ } else { +++ Register rreg = right->as_register(); +++ __ corrected_idivl(dreg, lreg, rreg, is_irem); +++ } +++} +++ +++void LIR_Assembler::arith_op_single_cpu_right_constant(LIR_Code code, LIR_Opr left, LIR_Opr right, +++ Register lreg, Register dreg) { +++ // cpu register - constant +++ jlong c; +++ +++ switch (right->type()) { +++ case T_LONG: +++ c = right->as_constant_ptr()->as_jlong(); break; +++ case T_INT: // fall through +++ case T_ADDRESS: +++ c = right->as_constant_ptr()->as_jint(); break; +++ default: +++ ShouldNotReachHere(); +++ c = 0; // unreachable +++ } +++ +++ assert(code == lir_add || code == lir_sub, "mismatched arithmetic op"); +++ if (c == 0 && dreg == lreg) { +++ COMMENT("effective nop elided"); +++ return; +++ } +++ switch (left->type()) { +++ case T_INT: +++ switch (code) { +++ case lir_add: __ addw(dreg, lreg, c); break; +++ case lir_sub: __ subw(dreg, lreg, c); break; +++ default: ShouldNotReachHere(); +++ } +++ break; +++ case T_OBJECT: // fall through +++ case T_ADDRESS: +++ switch (code) { +++ case lir_add: __ add(dreg, lreg, c); break; +++ case lir_sub: __ sub(dreg, lreg, c); break; +++ default: ShouldNotReachHere(); +++ } +++ break; +++ default: +++ ShouldNotReachHere(); +++ } +++} +++ +++void LIR_Assembler::arith_op_single_cpu(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest) { +++ Register lreg = left->as_register(); +++ Register dreg = as_reg(dest); +++ +++ if (right->is_single_cpu()) { +++ // cpu register - cpu register +++ assert(left->type() == T_INT && right->type() == T_INT && dest->type() == T_INT, "should be"); +++ Register rreg = right->as_register(); +++ switch (code) { +++ case lir_add: __ addw(dest->as_register(), lreg, rreg); break; +++ case lir_sub: __ subw(dest->as_register(), lreg, rreg); break; +++ case lir_mul: __ mulw(dest->as_register(), lreg, rreg); break; +++ default: ShouldNotReachHere(); +++ } +++ } else if (right->is_double_cpu()) { +++ Register rreg = right->as_register_lo(); +++ // sigle_cpu + double_cpu; can happen with obj_long +++ assert(code == lir_add || code == lir_sub, "mismatched arithmetic op"); +++ switch (code) { +++ case lir_add: __ add(dreg, lreg, rreg); break; +++ case lir_sub: __ sub(dreg, lreg, rreg); break; +++ default: ShouldNotReachHere(); +++ } +++ } else if (right->is_constant()) { +++ arith_op_single_cpu_right_constant(code, left, right, lreg, dreg); +++ } else { +++ ShouldNotReachHere(); +++ } +++} +++ +++void LIR_Assembler::arith_op_double_cpu(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest) { +++ Register lreg_lo = left->as_register_lo(); +++ +++ if (right->is_double_cpu()) { +++ // cpu register - cpu register +++ Register rreg_lo = right->as_register_lo(); +++ switch (code) { +++ case lir_add: __ add(dest->as_register_lo(), lreg_lo, rreg_lo); break; +++ case lir_sub: __ sub(dest->as_register_lo(), lreg_lo, rreg_lo); break; +++ case lir_mul: __ mul(dest->as_register_lo(), lreg_lo, rreg_lo); break; +++ case lir_div: __ corrected_idivq(dest->as_register_lo(), lreg_lo, rreg_lo, false); break; +++ case lir_rem: __ corrected_idivq(dest->as_register_lo(), lreg_lo, rreg_lo, true); break; +++ default: +++ ShouldNotReachHere(); +++ } +++ } else if (right->is_constant()) { +++ jlong c = right->as_constant_ptr()->as_jlong(); +++ Register dreg = as_reg(dest); +++ switch (code) { +++ case lir_add: // fall through +++ case lir_sub: +++ if (c == 0 && dreg == lreg_lo) { +++ COMMENT("effective nop elided"); +++ return; +++ } +++ code == lir_add ? __ add(dreg, lreg_lo, c) : __ sub(dreg, lreg_lo, c); +++ break; +++ case lir_div: +++ assert(c > 0 && is_power_of_2(c), "divisor must be power-of-2 constant"); +++ if (c == 1) { +++ // move lreg_lo to dreg if divisor is 1 +++ __ mv(dreg, lreg_lo); +++ } else { +++ unsigned int shift = exact_log2_long(c); +++ // use t0 as intermediate result register +++ __ srai(t0, lreg_lo, 0x3f); +++ if (Assembler::is_simm12(c - 1)) { +++ __ andi(t0, t0, c - 1); +++ } else { +++ __ zero_extend(t0, t0, shift); +++ } +++ __ add(dreg, t0, lreg_lo); +++ __ srai(dreg, dreg, shift); +++ } +++ break; +++ case lir_rem: +++ assert(c > 0 && is_power_of_2(c), "divisor must be power-of-2 constant"); +++ if (c == 1) { +++ // move 0 to dreg if divisor is 1 +++ __ mv(dreg, zr); +++ } else { +++ unsigned int shift = exact_log2_long(c); +++ __ srai(t0, lreg_lo, 0x3f); +++ __ srli(t0, t0, BitsPerLong - shift); +++ __ add(t1, lreg_lo, t0); +++ if (Assembler::is_simm12(c - 1)) { +++ __ andi(t1, t1, c - 1); +++ } else { +++ __ zero_extend(t1, t1, shift); +++ } +++ __ sub(dreg, t1, t0); +++ } +++ break; +++ default: +++ ShouldNotReachHere(); +++ } +++ } else { +++ ShouldNotReachHere(); +++ } +++} +++ +++void LIR_Assembler::arith_op_single_fpu(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest) { +++ assert(right->is_single_fpu(), "right hand side of float arithmetics needs to be float register"); +++ switch (code) { +++ case lir_add: __ fadd_s(dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break; +++ case lir_sub: __ fsub_s(dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break; +++ case lir_mul: __ fmul_s(dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break; +++ case lir_div: __ fdiv_s(dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break; +++ default: +++ ShouldNotReachHere(); +++ } +++} +++ +++void LIR_Assembler::arith_op_double_fpu(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest) { +++ if (right->is_double_fpu()) { +++ // fpu register - fpu register +++ switch (code) { +++ case lir_add: __ fadd_d(dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break; +++ case lir_sub: __ fsub_d(dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break; +++ case lir_mul: __ fmul_d(dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break; +++ case lir_div: __ fdiv_d(dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break; +++ default: +++ ShouldNotReachHere(); +++ } +++ } else { +++ ShouldNotReachHere(); +++ } +++} +++ +++void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, +++ CodeEmitInfo* info, bool pop_fpu_stack) { +++ assert(info == NULL, "should never be used, idiv/irem and ldiv/lrem not handled by this method"); +++ +++ if (left->is_single_cpu()) { +++ arith_op_single_cpu(code, left, right, dest); +++ } else if (left->is_double_cpu()) { +++ arith_op_double_cpu(code, left, right, dest); +++ } else if (left->is_single_fpu()) { +++ arith_op_single_fpu(code, left, right, dest); +++ } else if (left->is_double_fpu()) { +++ arith_op_double_fpu(code, left, right, dest); +++ } else { +++ ShouldNotReachHere(); +++ } +++} +++ +++#undef __ ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/c1_LIRAssembler_arith_riscv.hpp ++@@ -0,0 +1,37 @@ +++/* +++ * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#ifndef CPU_RISCV_C1_LIRASSEMBLER_ARITH_RISCV_HPP +++#define CPU_RISCV_C1_LIRASSEMBLER_ARITH_RISCV_HPP +++ +++ // arith_op sub functions +++ void arith_op_single_cpu(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest); +++ void arith_op_double_cpu(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest); +++ void arith_op_single_fpu(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest); +++ void arith_op_double_fpu(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest); +++ void arith_op_single_cpu_right_constant(LIR_Code code, LIR_Opr left, LIR_Opr right, Register lreg, Register dreg); +++ void arithmetic_idiv(LIR_Op3* op, bool is_irem); +++ +++#endif // CPU_RISCV_C1_LIRASSEMBLER_ARITH_RISCV_HPP ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/c1_LIRAssembler_arraycopy_riscv.cpp ++@@ -0,0 +1,387 @@ +++/* +++ * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#include "precompiled.hpp" +++#include "asm/assembler.hpp" +++#include "c1/c1_LIRAssembler.hpp" +++#include "c1/c1_MacroAssembler.hpp" +++#include "ci/ciArrayKlass.hpp" +++#include "oops/objArrayKlass.hpp" +++#include "runtime/stubRoutines.hpp" +++ +++#define __ _masm-> +++ +++ +++void LIR_Assembler::generic_arraycopy(Register src, Register src_pos, Register length, +++ Register dst, Register dst_pos, CodeStub *stub) { +++ assert(src == x11 && src_pos == x12, "mismatch in calling convention"); +++ // Save the arguments in case the generic arraycopy fails and we +++ // have to fall back to the JNI stub +++ arraycopy_store_args(src, src_pos, length, dst, dst_pos); +++ +++ address copyfunc_addr = StubRoutines::generic_arraycopy(); +++ assert(copyfunc_addr != NULL, "generic arraycopy stub required"); +++ +++ // The arguments are in java calling convention so we shift them +++ // to C convention +++ assert_different_registers(c_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4); +++ __ mv(c_rarg0, j_rarg0); +++ assert_different_registers(c_rarg1, j_rarg2, j_rarg3, j_rarg4); +++ __ mv(c_rarg1, j_rarg1); +++ assert_different_registers(c_rarg2, j_rarg3, j_rarg4); +++ __ mv(c_rarg2, j_rarg2); +++ assert_different_registers(c_rarg3, j_rarg4); +++ __ mv(c_rarg3, j_rarg3); +++ __ mv(c_rarg4, j_rarg4); +++#ifndef PRODUCT +++ if (PrintC1Statistics) { +++ __ incrementw(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt)); +++ } +++#endif +++ __ far_call(RuntimeAddress(copyfunc_addr)); +++ __ beqz(x10, *stub->continuation()); +++ // Reload values from the stack so they are where the stub +++ // expects them. +++ arraycopy_load_args(src, src_pos, length, dst, dst_pos); +++ +++ // x10 is -1^K where K == partial copied count +++ __ xori(t0, x10, -1); +++ // adjust length down and src/end pos up by partial copied count +++ __ subw(length, length, t0); +++ __ addw(src_pos, src_pos, t0); +++ __ addw(dst_pos, dst_pos, t0); +++ __ j(*stub->entry()); +++ +++ __ bind(*stub->continuation()); +++} +++ +++void LIR_Assembler::arraycopy_simple_check(Register src, Register src_pos, Register length, +++ Register dst, Register dst_pos, Register tmp, +++ CodeStub *stub, int flags) { +++ // test for NULL +++ if (flags & LIR_OpArrayCopy::src_null_check) { +++ __ beqz(src, *stub->entry(), /* is_far */ true); +++ } +++ if (flags & LIR_OpArrayCopy::dst_null_check) { +++ __ beqz(dst, *stub->entry(), /* is_far */ true); +++ } +++ +++ // If the compiler was not able to prove that exact type of the source or the destination +++ // of the arraycopy is an array type, check at runtime if the source or the destination is +++ // an instance type. +++ if (flags & LIR_OpArrayCopy::type_check) { +++ assert(Klass::_lh_neutral_value == 0, "or replace bgez instructions"); +++ if (!(flags & LIR_OpArrayCopy::LIR_OpArrayCopy::dst_objarray)) { +++ __ load_klass(tmp, dst); +++ __ lw(t0, Address(tmp, in_bytes(Klass::layout_helper_offset()))); +++ __ bgez(t0, *stub->entry(), /* is_far */ true); +++ } +++ +++ if (!(flags & LIR_OpArrayCopy::LIR_OpArrayCopy::src_objarray)) { +++ __ load_klass(tmp, src); +++ __ lw(t0, Address(tmp, in_bytes(Klass::layout_helper_offset()))); +++ __ bgez(t0, *stub->entry(), /* is_far */ true); +++ } +++ } +++ +++ // check if negative +++ if (flags & LIR_OpArrayCopy::src_pos_positive_check) { +++ __ bltz(src_pos, *stub->entry(), /* is_far */ true); +++ } +++ if (flags & LIR_OpArrayCopy::dst_pos_positive_check) { +++ __ bltz(dst_pos, *stub->entry(), /* is_far */ true); +++ } +++ if (flags & LIR_OpArrayCopy::length_positive_check) { +++ __ bltz(length, *stub->entry(), /* is_far */ true); +++ } +++ +++ if (flags & LIR_OpArrayCopy::src_range_check) { +++ __ addw(tmp, src_pos, length); +++ __ lwu(t0, Address(src, arrayOopDesc::length_offset_in_bytes())); +++ __ bgtu(tmp, t0, *stub->entry(), /* is_far */ true); +++ } +++ if (flags & LIR_OpArrayCopy::dst_range_check) { +++ __ addw(tmp, dst_pos, length); +++ __ lwu(t0, Address(dst, arrayOopDesc::length_offset_in_bytes())); +++ __ bgtu(tmp, t0, *stub->entry(), /* is_far */ true); +++ } +++} +++ +++void LIR_Assembler::arraycopy_checkcast(Register src, Register src_pos, Register length, +++ Register dst, Register dst_pos, Register tmp, +++ CodeStub *stub, BasicType basic_type, +++ address copyfunc_addr, int flags) { +++ // src is not a sub class of dst so we have to do a +++ // per-element check. +++ int mask = LIR_OpArrayCopy::src_objarray | LIR_OpArrayCopy::dst_objarray; +++ if ((flags & mask) != mask) { +++ // Check that at least both of them object arrays. +++ assert(flags & mask, "one of the two should be known to be an object array"); +++ +++ if (!(flags & LIR_OpArrayCopy::src_objarray)) { +++ __ load_klass(tmp, src); +++ } else if (!(flags & LIR_OpArrayCopy::dst_objarray)) { +++ __ load_klass(tmp, dst); +++ } +++ int lh_offset = in_bytes(Klass::layout_helper_offset()); +++ Address klass_lh_addr(tmp, lh_offset); +++ jint objArray_lh = Klass::array_layout_helper(T_OBJECT); +++ __ lw(t0, klass_lh_addr); +++ __ mv(t1, objArray_lh); +++ __ bne(t0, t1, *stub->entry(), /* is_far */ true); +++ } +++ +++ // Spill because stubs can use any register they like and it's +++ // easier to restore just those that we care about. +++ arraycopy_store_args(src, src_pos, length, dst, dst_pos); +++ arraycopy_checkcast_prepare_params(src, src_pos, length, dst, dst_pos, basic_type); +++ __ far_call(RuntimeAddress(copyfunc_addr)); +++ +++#ifndef PRODUCT +++ if (PrintC1Statistics) { +++ Label failed; +++ __ bnez(x10, failed); +++ __ incrementw(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_cnt)); +++ __ bind(failed); +++ } +++#endif +++ +++ __ beqz(x10, *stub->continuation()); +++ +++#ifndef PRODUCT +++ if (PrintC1Statistics) { +++ __ incrementw(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_attempt_cnt)); +++ } +++#endif +++ assert_different_registers(dst, dst_pos, length, src_pos, src, x10, t0); +++ +++ // Restore previously spilled arguments +++ arraycopy_load_args(src, src_pos, length, dst, dst_pos); +++ +++ // return value is -1^K where K is partial copied count +++ __ xori(t0, x10, -1); +++ // adjust length down and src/end pos up by partial copied count +++ __ subw(length, length, t0); +++ __ addw(src_pos, src_pos, t0); +++ __ addw(dst_pos, dst_pos, t0); +++} +++ +++void LIR_Assembler::arraycopy_type_check(Register src, Register src_pos, Register length, +++ Register dst, Register dst_pos, Register tmp, +++ CodeStub *stub, BasicType basic_type, int flags) { +++ // We don't know the array types are compatible +++ if (basic_type != T_OBJECT) { +++ // Simple test for basic type arrays +++ if (UseCompressedClassPointers) { +++ __ lwu(tmp, Address(src, oopDesc::klass_offset_in_bytes())); +++ __ lwu(t0, Address(dst, oopDesc::klass_offset_in_bytes())); +++ } else { +++ __ ld(tmp, Address(src, oopDesc::klass_offset_in_bytes())); +++ __ ld(t0, Address(dst, oopDesc::klass_offset_in_bytes())); +++ } +++ __ bne(tmp, t0, *stub->entry(), /* is_far */ true); +++ } else { +++ // For object arrays, if src is a sub class of dst then we can +++ // safely do the copy. +++ Label cont, slow; +++ +++#define PUSH(r1, r2) \ +++ __ addi(sp, sp, -2 * wordSize); \ +++ __ sd(r1, Address(sp, 1 * wordSize)); \ +++ __ sd(r2, Address(sp, 0)); +++ +++#define POP(r1, r2) \ +++ __ ld(r1, Address(sp, 1 * wordSize)); \ +++ __ ld(r2, Address(sp, 0)); \ +++ __ addi(sp, sp, 2 * wordSize); +++ +++ PUSH(src, dst); +++ __ load_klass(src, src); +++ __ load_klass(dst, dst); +++ __ check_klass_subtype_fast_path(src, dst, tmp, &cont, &slow, NULL); +++ +++ PUSH(src, dst); +++ __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id))); +++ POP(src, dst); +++ __ bnez(dst, cont); +++ +++ __ bind(slow); +++ POP(src, dst); +++ +++ address copyfunc_addr = StubRoutines::checkcast_arraycopy(); +++ if (copyfunc_addr != NULL) { // use stub if available +++ arraycopy_checkcast(src, src_pos, length, dst, dst_pos, tmp, stub, basic_type, copyfunc_addr, flags); +++ } +++ +++ __ j(*stub->entry()); +++ __ bind(cont); +++ POP(src, dst); +++ } +++} +++ +++void LIR_Assembler::arraycopy_assert(Register src, Register dst, Register tmp, ciArrayKlass *default_type, int flags) { +++ assert(default_type != NULL, "NULL default_type!"); +++ BasicType basic_type = default_type->element_type()->basic_type(); +++ +++ if (basic_type == T_ARRAY) { basic_type = T_OBJECT; } +++ if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) { +++ // Sanity check the known type with the incoming class. For the +++ // primitive case the types must match exactly with src.klass and +++ // dst.klass each exactly matching the default type. For the +++ // object array case, if no type check is needed then either the +++ // dst type is exactly the expected type and the src type is a +++ // subtype which we can't check or src is the same array as dst +++ // but not necessarily exactly of type default_type. +++ Label known_ok, halt; +++ __ mov_metadata(tmp, default_type->constant_encoding()); +++ if (UseCompressedClassPointers) { +++ __ encode_klass_not_null(tmp); +++ } +++ +++ if (basic_type != T_OBJECT) { +++ if (UseCompressedClassPointers) { +++ __ lwu(t0, Address(dst, oopDesc::klass_offset_in_bytes())); +++ } else { +++ __ ld(t0, Address(dst, oopDesc::klass_offset_in_bytes())); +++ } +++ __ bne(tmp, t0, halt); +++ if (UseCompressedClassPointers) { +++ __ lwu(t0, Address(src, oopDesc::klass_offset_in_bytes())); +++ } else { +++ __ ld(t0, Address(src, oopDesc::klass_offset_in_bytes())); +++ } +++ __ beq(tmp, t0, known_ok); +++ } else { +++ if (UseCompressedClassPointers) { +++ __ lwu(t0, Address(dst, oopDesc::klass_offset_in_bytes())); +++ } else { +++ __ ld(t0, Address(dst, oopDesc::klass_offset_in_bytes())); +++ } +++ __ beq(tmp, t0, known_ok); +++ __ beq(src, dst, known_ok); +++ } +++ __ bind(halt); +++ __ stop("incorrect type information in arraycopy"); +++ __ bind(known_ok); +++ } +++} +++ +++void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { +++ ciArrayKlass *default_type = op->expected_type(); +++ Register src = op->src()->as_register(); +++ Register dst = op->dst()->as_register(); +++ Register src_pos = op->src_pos()->as_register(); +++ Register dst_pos = op->dst_pos()->as_register(); +++ Register length = op->length()->as_register(); +++ Register tmp = op->tmp()->as_register(); +++ +++ CodeStub* stub = op->stub(); +++ int flags = op->flags(); +++ BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL; +++ if (is_reference_type(basic_type)) { basic_type = T_OBJECT; } +++ +++ // if we don't know anything, just go through the generic arraycopy +++ if (default_type == NULL) { +++ generic_arraycopy(src, src_pos, length, dst, dst_pos, stub); +++ return; +++ } +++ +++ assert(default_type != NULL && default_type->is_array_klass() && default_type->is_loaded(), +++ "must be true at this point"); +++ +++ arraycopy_simple_check(src, src_pos, length, dst, dst_pos, tmp, stub, flags); +++ +++ if (flags & LIR_OpArrayCopy::type_check) { +++ arraycopy_type_check(src, src_pos, length, dst, dst_pos, tmp, stub, basic_type, flags); +++ } +++ +++#ifdef ASSERT +++ arraycopy_assert(src, dst, tmp, default_type, flags); +++#endif +++ +++#ifndef PRODUCT +++ if (PrintC1Statistics) { +++ __ incrementw(ExternalAddress(Runtime1::arraycopy_count_address(basic_type))); +++ } +++#endif +++ arraycopy_prepare_params(src, src_pos, length, dst, dst_pos, basic_type); +++ +++ bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0; +++ bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0; +++ const char *name = NULL; +++ address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false); +++ +++ CodeBlob *cb = CodeCache::find_blob(entry); +++ if (cb != NULL) { +++ __ far_call(RuntimeAddress(entry)); +++ } else { +++ const int args_num = 3; +++ __ call_VM_leaf(entry, args_num); +++ } +++ +++ __ bind(*stub->continuation()); +++} +++ +++ +++void LIR_Assembler::arraycopy_prepare_params(Register src, Register src_pos, Register length, +++ Register dst, Register dst_pos, BasicType basic_type) { +++ int scale = array_element_size(basic_type); +++ __ shadd(c_rarg0, src_pos, src, t0, scale); +++ __ add(c_rarg0, c_rarg0, arrayOopDesc::base_offset_in_bytes(basic_type)); +++ assert_different_registers(c_rarg0, dst, dst_pos, length); +++ __ shadd(c_rarg1, dst_pos, dst, t0, scale); +++ __ add(c_rarg1, c_rarg1, arrayOopDesc::base_offset_in_bytes(basic_type)); +++ assert_different_registers(c_rarg1, dst, length); +++ __ mv(c_rarg2, length); +++ assert_different_registers(c_rarg2, dst); +++} +++ +++void LIR_Assembler::arraycopy_checkcast_prepare_params(Register src, Register src_pos, Register length, +++ Register dst, Register dst_pos, BasicType basic_type) { +++ arraycopy_prepare_params(src, src_pos, length, dst, dst_pos, basic_type); +++ __ load_klass(c_rarg4, dst); +++ __ ld(c_rarg4, Address(c_rarg4, ObjArrayKlass::element_klass_offset())); +++ __ lwu(c_rarg3, Address(c_rarg4, Klass::super_check_offset_offset())); +++} +++ +++void LIR_Assembler::arraycopy_store_args(Register src, Register src_pos, Register length, +++ Register dst, Register dst_pos) { +++ __ sd(dst_pos, Address(sp, 0)); // 0: dst_pos sp offset +++ __ sd(dst, Address(sp, 1 * BytesPerWord)); // 1: dst sp offset +++ __ sd(length, Address(sp, 2 * BytesPerWord)); // 2: length sp offset +++ __ sd(src_pos, Address(sp, 3 * BytesPerWord)); // 3: src_pos sp offset +++ __ sd(src, Address(sp, 4 * BytesPerWord)); // 4: src sp offset +++} +++ +++void LIR_Assembler::arraycopy_load_args(Register src, Register src_pos, Register length, +++ Register dst, Register dst_pos) { +++ __ ld(dst_pos, Address(sp, 0)); // 0: dst_pos sp offset +++ __ ld(dst, Address(sp, 1 * BytesPerWord)); // 1: dst sp offset +++ __ ld(length, Address(sp, 2 * BytesPerWord)); // 2: length sp offset +++ __ ld(src_pos, Address(sp, 3 * BytesPerWord)); // 3: src_pos sp offset +++ __ ld(src, Address(sp, 4 * BytesPerWord)); // 4: src sp offset +++} +++ +++#undef __ ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/c1_LIRAssembler_arraycopy_riscv.hpp ++@@ -0,0 +1,52 @@ +++/* +++ * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#ifndef CPU_RISCV_C1_LIRASSEMBLER_ARRAYCOPY_RISCV_HPP +++#define CPU_RISCV_C1_LIRASSEMBLER_ARRAYCOPY_RISCV_HPP +++ +++ // arraycopy sub functions +++ void generic_arraycopy(Register src, Register src_pos, Register length, +++ Register dst, Register dst_pos, CodeStub *stub); +++ void arraycopy_simple_check(Register src, Register src_pos, Register length, +++ Register dst, Register dst_pos, Register tmp, +++ CodeStub *stub, int flags); +++ void arraycopy_checkcast(Register src, Register src_pos, Register length, +++ Register dst, Register dst_pos, Register tmp, +++ CodeStub *stub, BasicType basic_type, +++ address copyfunc_addr, int flags); +++ void arraycopy_type_check(Register src, Register src_pos, Register length, +++ Register dst, Register dst_pos, Register tmp, +++ CodeStub *stub, BasicType basic_type, int flags); +++ void arraycopy_assert(Register src, Register dst, Register tmp, ciArrayKlass *default_type, int flags); +++ void arraycopy_prepare_params(Register src, Register src_pos, Register length, +++ Register dst, Register dst_pos, BasicType basic_type); +++ void arraycopy_checkcast_prepare_params(Register src, Register src_pos, Register length, +++ Register dst, Register dst_pos, BasicType basic_type); +++ void arraycopy_store_args(Register src, Register src_pos, Register length, +++ Register dst, Register dst_pos); +++ void arraycopy_load_args(Register src, Register src_pos, Register length, +++ Register dst, Register dst_pos); +++ +++#endif // CPU_RISCV_C1_LIRASSEMBLER_ARRAYCOPY_RISCV_HPP ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.cpp ++@@ -0,0 +1,2254 @@ +++/* +++ * Copyright (c) 2000, 2020, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved. +++ * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#include "precompiled.hpp" +++#include "asm/assembler.hpp" +++#include "asm/macroAssembler.inline.hpp" +++#include "c1/c1_CodeStubs.hpp" +++#include "c1/c1_Compilation.hpp" +++#include "c1/c1_LIRAssembler.hpp" +++#include "c1/c1_MacroAssembler.hpp" +++#include "c1/c1_Runtime1.hpp" +++#include "c1/c1_ValueStack.hpp" +++#include "ci/ciArrayKlass.hpp" +++#include "ci/ciInstance.hpp" +++#include "code/compiledIC.hpp" +++#include "gc/shared/collectedHeap.hpp" +++#include "nativeInst_riscv.hpp" +++#include "oops/objArrayKlass.hpp" +++#include "runtime/frame.inline.hpp" +++#include "runtime/sharedRuntime.hpp" +++#include "utilities/powerOfTwo.hpp" +++#include "vmreg_riscv.inline.hpp" +++ +++#ifndef PRODUCT +++#define COMMENT(x) do { __ block_comment(x); } while (0) +++#else +++#define COMMENT(x) +++#endif +++ +++NEEDS_CLEANUP // remove this definitions ? +++const Register IC_Klass = t1; // where the IC klass is cached +++const Register SYNC_header = x10; // synchronization header +++const Register SHIFT_count = x10; // where count for shift operations must be +++ +++#define __ _masm-> +++ +++static void select_different_registers(Register preserve, +++ Register extra, +++ Register &tmp1, +++ Register &tmp2) { +++ if (tmp1 == preserve) { +++ assert_different_registers(tmp1, tmp2, extra); +++ tmp1 = extra; +++ } else if (tmp2 == preserve) { +++ assert_different_registers(tmp1, tmp2, extra); +++ tmp2 = extra; +++ } +++ assert_different_registers(preserve, tmp1, tmp2); +++} +++ +++static void select_different_registers(Register preserve, +++ Register extra, +++ Register &tmp1, +++ Register &tmp2, +++ Register &tmp3) { +++ if (tmp1 == preserve) { +++ assert_different_registers(tmp1, tmp2, tmp3, extra); +++ tmp1 = extra; +++ } else if (tmp2 == preserve) { +++ assert_different_registers(tmp1, tmp2, tmp3, extra); +++ tmp2 = extra; +++ } else if (tmp3 == preserve) { +++ assert_different_registers(tmp1, tmp2, tmp3, extra); +++ tmp3 = extra; +++ } +++ assert_different_registers(preserve, tmp1, tmp2, tmp3); +++} +++ +++bool LIR_Assembler::is_small_constant(LIR_Opr opr) { Unimplemented(); return false; } +++ +++void LIR_Assembler::clinit_barrier(ciMethod* method) { +++ assert(VM_Version::supports_fast_class_init_checks(), "sanity"); +++ assert(!method->holder()->is_not_initialized(), "initialization should have been started"); +++ +++ Label L_skip_barrier; +++ +++ __ mov_metadata(t1, method->holder()->constant_encoding()); +++ __ clinit_barrier(t1, t0, &L_skip_barrier /* L_fast_path */); +++ __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub())); +++ __ bind(L_skip_barrier); +++} +++ +++LIR_Opr LIR_Assembler::receiverOpr() { +++ return FrameMap::receiver_opr; +++} +++ +++LIR_Opr LIR_Assembler::osrBufferPointer() { +++ return FrameMap::as_pointer_opr(receiverOpr()->as_register()); +++} +++ +++void LIR_Assembler::breakpoint() { Unimplemented(); } +++ +++void LIR_Assembler::push(LIR_Opr opr) { Unimplemented(); } +++ +++void LIR_Assembler::pop(LIR_Opr opr) { Unimplemented(); } +++ +++static jlong as_long(LIR_Opr data) { +++ jlong result; +++ switch (data->type()) { +++ case T_INT: +++ result = (data->as_jint()); +++ break; +++ case T_LONG: +++ result = (data->as_jlong()); +++ break; +++ default: +++ ShouldNotReachHere(); +++ result = 0; // unreachable +++ } +++ return result; +++} +++ +++Address LIR_Assembler::as_Address(LIR_Address* addr, Register tmp) { +++ if (addr->base()->is_illegal()) { +++ assert(addr->index()->is_illegal(), "must be illegal too"); +++ __ movptr(tmp, addr->disp()); +++ return Address(tmp, 0); +++ } +++ +++ Register base = addr->base()->as_pointer_register(); +++ LIR_Opr index_opr = addr->index(); +++ +++ if (index_opr->is_illegal()) { +++ return Address(base, addr->disp()); +++ } +++ +++ int scale = addr->scale(); +++ if (index_opr->is_cpu_register()) { +++ Register index; +++ if (index_opr->is_single_cpu()) { +++ index = index_opr->as_register(); +++ } else { +++ index = index_opr->as_register_lo(); +++ } +++ if (scale != 0) { +++ __ shadd(tmp, index, base, tmp, scale); +++ } else { +++ __ add(tmp, base, index); +++ } +++ return Address(tmp, addr->disp()); +++ } else if (index_opr->is_constant()) { +++ intptr_t addr_offset = (((intptr_t)index_opr->as_constant_ptr()->as_jint()) << scale) + addr->disp(); +++ return Address(base, addr_offset); +++ } +++ +++ Unimplemented(); +++ return Address(); +++} +++ +++Address LIR_Assembler::as_Address_hi(LIR_Address* addr) { +++ ShouldNotReachHere(); +++ return Address(); +++} +++ +++Address LIR_Assembler::as_Address(LIR_Address* addr) { +++ return as_Address(addr, t0); +++} +++ +++Address LIR_Assembler::as_Address_lo(LIR_Address* addr) { +++ return as_Address(addr); +++} +++ +++// Ensure a valid Address (base + offset) to a stack-slot. If stack access is +++// not encodable as a base + (immediate) offset, generate an explicit address +++// calculation to hold the address in t0. +++Address LIR_Assembler::stack_slot_address(int index, uint size, int adjust) { +++ precond(size == 4 || size == 8); +++ Address addr = frame_map()->address_for_slot(index, adjust); +++ precond(addr.getMode() == Address::base_plus_offset); +++ precond(addr.base() == sp); +++ precond(addr.offset() > 0); +++ uint mask = size - 1; +++ assert((addr.offset() & mask) == 0, "scaled offsets only"); +++ +++ return addr; +++} +++ +++void LIR_Assembler::osr_entry() { +++ offsets()->set_value(CodeOffsets::OSR_Entry, code_offset()); +++ BlockBegin* osr_entry = compilation()->hir()->osr_entry(); +++ guarantee(osr_entry != NULL, "NULL osr_entry!"); +++ ValueStack* entry_state = osr_entry->state(); +++ int number_of_locks = entry_state->locks_size(); +++ +++ // we jump here if osr happens with the interpreter +++ // state set up to continue at the beginning of the +++ // loop that triggered osr - in particular, we have +++ // the following registers setup: +++ // +++ // x12: osr buffer +++ // +++ +++ //build frame +++ ciMethod* m = compilation()->method(); +++ __ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes()); +++ +++ // OSR buffer is +++ // +++ // locals[nlocals-1..0] +++ // monitors[0..number_of_locks] +++ // +++ // locals is a direct copy of the interpreter frame so in the osr buffer +++ // so first slot in the local array is the last local from the interpreter +++ // and last slot is local[0] (receiver) from the interpreter +++ // +++ // Similarly with locks. The first lock slot in the osr buffer is the nth lock +++ // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock +++ // in the interpreter frame (the method lock if a sync method) +++ +++ // Initialize monitors in the compiled activation. +++ // x12: pointer to osr buffer +++ // All other registers are dead at this point and the locals will be +++ // copied into place by code emitted in the IR. +++ +++ Register OSR_buf = osrBufferPointer()->as_pointer_register(); +++ { +++ assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below"); +++ int monitor_offset = BytesPerWord * method()->max_locals() + +++ (2 * BytesPerWord) * (number_of_locks - 1); +++ // SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in +++ // the OSR buffer using 2 word entries: first the lock and then +++ // the oop. +++ for (int i = 0; i < number_of_locks; i++) { +++ int slot_offset = monitor_offset - ((i * 2) * BytesPerWord); +++#ifdef ASSERT +++ // verify the interpreter's monitor has a non-null object +++ { +++ Label L; +++ __ ld(t0, Address(OSR_buf, slot_offset + 1 * BytesPerWord)); +++ __ bnez(t0, L); +++ __ stop("locked object is NULL"); +++ __ bind(L); +++ } +++#endif // ASSERT +++ __ ld(x9, Address(OSR_buf, slot_offset + 0)); +++ __ sd(x9, frame_map()->address_for_monitor_lock(i)); +++ __ ld(x9, Address(OSR_buf, slot_offset + 1 * BytesPerWord)); +++ __ sd(x9, frame_map()->address_for_monitor_object(i)); +++ } +++ } +++} +++ +++// inline cache check; done before the frame is built. +++int LIR_Assembler::check_icache() { +++ Register receiver = FrameMap::receiver_opr->as_register(); +++ Register ic_klass = IC_Klass; +++ int start_offset = __ offset(); +++ Label dont; +++ __ inline_cache_check(receiver, ic_klass, dont); +++ +++ // if icache check fails, then jump to runtime routine +++ // Note: RECEIVER must still contain the receiver! +++ __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub())); +++ +++ // We align the verified entry point unless the method body +++ // (including its inline cache check) will fit in a single 64-byte +++ // icache line. +++ if (!method()->is_accessor() || __ offset() - start_offset > 4 * 4) { +++ // force alignment after the cache check. +++ __ align(CodeEntryAlignment); +++ } +++ +++ __ bind(dont); +++ return start_offset; +++} +++ +++void LIR_Assembler::jobject2reg(jobject o, Register reg) { +++ if (o == NULL) { +++ __ mv(reg, zr); +++ } else { +++ __ movoop(reg, o, /* immediate */ true); +++ } +++} +++ +++void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) { +++ deoptimize_trap(info); +++} +++ +++// This specifies the rsp decrement needed to build the frame +++int LIR_Assembler::initial_frame_size_in_bytes() const { +++ // if rounding, must let FrameMap know! +++ +++ return in_bytes(frame_map()->framesize_in_bytes()); +++} +++ +++int LIR_Assembler::emit_exception_handler() { +++ // if the last instruction is a call (typically to do a throw which +++ // is coming at the end after block reordering) the return address +++ // must still point into the code area in order to avoid assertion +++ // failures when searching for the corresponding bci ==> add a nop +++ // (was bug 5/14/1999 -gri) +++ __ nop(); +++ +++ // generate code for exception handler +++ address handler_base = __ start_a_stub(exception_handler_size()); +++ if (handler_base == NULL) { +++ // not enough space left for the handler +++ bailout("exception handler overflow"); +++ return -1; +++ } +++ +++ int offset = code_offset(); +++ +++ // the exception oop and pc are in x10, and x13 +++ // no other registers need to be preserved, so invalidate them +++ __ invalidate_registers(false, true, true, false, true, true); +++ +++ // check that there is really an exception +++ __ verify_not_null_oop(x10); +++ +++ // search an exception handler (x10: exception oop, x13: throwing pc) +++ __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id))); +++ __ should_not_reach_here(); +++ guarantee(code_offset() - offset <= exception_handler_size(), "overflow"); +++ __ end_a_stub(); +++ +++ return offset; +++} +++ +++// Emit the code to remove the frame from the stack in the exception +++// unwind path. +++int LIR_Assembler::emit_unwind_handler() { +++#ifndef PRODUCT +++ if (CommentedAssembly) { +++ _masm->block_comment("Unwind handler"); +++ } +++#endif // PRODUCT +++ +++ int offset = code_offset(); +++ +++ // Fetch the exception from TLS and clear out exception related thread state +++ __ ld(x10, Address(xthread, JavaThread::exception_oop_offset())); +++ __ sd(zr, Address(xthread, JavaThread::exception_oop_offset())); +++ __ sd(zr, Address(xthread, JavaThread::exception_pc_offset())); +++ +++ __ bind(_unwind_handler_entry); +++ __ verify_not_null_oop(x10); +++ if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) { +++ __ mv(x9, x10); // Perserve the exception +++ } +++ +++ // Preform needed unlocking +++ MonitorExitStub* stub = NULL; +++ if (method()->is_synchronized()) { +++ monitor_address(0, FrameMap::r10_opr); +++ stub = new MonitorExitStub(FrameMap::r10_opr, true, 0); +++ __ unlock_object(x15, x14, x10, *stub->entry()); +++ __ bind(*stub->continuation()); +++ } +++ +++ if (compilation()->env()->dtrace_method_probes()) { +++ __ mv(c_rarg0, xthread); +++ __ mov_metadata(c_rarg1, method()->constant_encoding()); +++ __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), c_rarg0, c_rarg1); +++ } +++ +++ if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) { +++ __ mv(x10, x9); // Restore the exception +++ } +++ +++ // remove the activation and dispatch to the unwind handler +++ __ block_comment("remove_frame and dispatch to the unwind handler"); +++ __ remove_frame(initial_frame_size_in_bytes()); +++ __ far_jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id))); +++ +++ // Emit the slow path assembly +++ if (stub != NULL) { +++ stub->emit_code(this); +++ } +++ +++ return offset; +++} +++ +++int LIR_Assembler::emit_deopt_handler() { +++ // if the last instruciton is a call (typically to do a throw which +++ // is coming at the end after block reordering) the return address +++ // must still point into the code area in order to avoid assertion +++ // failures when searching for the corresponding bck => add a nop +++ // (was bug 5/14/1999 - gri) +++ __ nop(); +++ +++ // generate code for exception handler +++ address handler_base = __ start_a_stub(deopt_handler_size()); +++ if (handler_base == NULL) { +++ // not enough space left for the handler +++ bailout("deopt handler overflow"); +++ return -1; +++ } +++ +++ int offset = code_offset(); +++ +++ __ auipc(ra, 0); +++ __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack())); +++ guarantee(code_offset() - offset <= deopt_handler_size(), "overflow"); +++ __ end_a_stub(); +++ +++ return offset; +++} +++ +++void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) { +++ assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == x10, "word returns are in x10"); +++ +++ // Pop the stack before the safepoint code +++ __ remove_frame(initial_frame_size_in_bytes()); +++ +++ if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) { +++ __ reserved_stack_check(); +++ } +++ +++ code_stub->set_safepoint_offset(__ offset()); +++ __ relocate(relocInfo::poll_return_type); +++ __ safepoint_poll(*code_stub->entry(), true /* at_return */, false /* acquire */, true /* in_nmethod */); +++ __ ret(); +++} +++ +++int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) { +++ guarantee(info != NULL, "Shouldn't be NULL"); +++ __ get_polling_page(t0, relocInfo::poll_type); +++ add_debug_info_for_branch(info); // This isn't just debug info: +++ // it's the oop map +++ __ read_polling_page(t0, 0, relocInfo::poll_type); +++ return __ offset(); +++} +++ +++void LIR_Assembler::move_regs(Register from_reg, Register to_reg) { +++ __ mv(to_reg, from_reg); +++} +++ +++void LIR_Assembler::swap_reg(Register a, Register b) { Unimplemented(); } +++ +++void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) { +++ assert(src->is_constant(), "should not call otherwise"); +++ assert(dest->is_register(), "should not call otherwise"); +++ LIR_Const* c = src->as_constant_ptr(); +++ address const_addr = NULL; +++ +++ switch (c->type()) { +++ case T_INT: +++ assert(patch_code == lir_patch_none, "no patching handled here"); +++ __ mv(dest->as_register(), c->as_jint()); +++ break; +++ +++ case T_ADDRESS: +++ assert(patch_code == lir_patch_none, "no patching handled here"); +++ __ mv(dest->as_register(), c->as_jint()); +++ break; +++ +++ case T_LONG: +++ assert(patch_code == lir_patch_none, "no patching handled here"); +++ __ mv(dest->as_register_lo(), (intptr_t)c->as_jlong()); +++ break; +++ +++ case T_OBJECT: +++ case T_ARRAY: +++ if (patch_code == lir_patch_none) { +++ jobject2reg(c->as_jobject(), dest->as_register()); +++ } else { +++ jobject2reg_with_patching(dest->as_register(), info); +++ } +++ break; +++ +++ case T_METADATA: +++ if (patch_code != lir_patch_none) { +++ klass2reg_with_patching(dest->as_register(), info); +++ } else { +++ __ mov_metadata(dest->as_register(), c->as_metadata()); +++ } +++ break; +++ +++ case T_FLOAT: +++ const_addr = float_constant(c->as_jfloat()); +++ assert(const_addr != NULL, "must create float constant in the constant table"); +++ __ flw(dest->as_float_reg(), InternalAddress(const_addr)); +++ break; +++ +++ case T_DOUBLE: +++ const_addr = double_constant(c->as_jdouble()); +++ assert(const_addr != NULL, "must create double constant in the constant table"); +++ __ fld(dest->as_double_reg(), InternalAddress(const_addr)); +++ break; +++ +++ default: +++ ShouldNotReachHere(); +++ } +++} +++ +++void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) { +++ assert(src->is_constant(), "should not call otherwise"); +++ assert(dest->is_stack(), "should not call otherwise"); +++ LIR_Const* c = src->as_constant_ptr(); +++ switch (c->type()) { +++ case T_OBJECT: +++ if (c->as_jobject() == NULL) { +++ __ sd(zr, frame_map()->address_for_slot(dest->single_stack_ix())); +++ } else { +++ const2reg(src, FrameMap::t1_opr, lir_patch_none, NULL); +++ reg2stack(FrameMap::t1_opr, dest, c->type(), false); +++ } +++ break; +++ case T_ADDRESS: // fall through +++ const2reg(src, FrameMap::t1_opr, lir_patch_none, NULL); +++ reg2stack(FrameMap::t1_opr, dest, c->type(), false); +++ case T_INT: // fall through +++ case T_FLOAT: +++ if (c->as_jint_bits() == 0) { +++ __ sw(zr, frame_map()->address_for_slot(dest->single_stack_ix())); +++ } else { +++ __ mv(t1, c->as_jint_bits()); +++ __ sw(t1, frame_map()->address_for_slot(dest->single_stack_ix())); +++ } +++ break; +++ case T_LONG: // fall through +++ case T_DOUBLE: +++ if (c->as_jlong_bits() == 0) { +++ __ sd(zr, frame_map()->address_for_slot(dest->double_stack_ix(), +++ lo_word_offset_in_bytes)); +++ } else { +++ __ mv(t1, (intptr_t)c->as_jlong_bits()); +++ __ sd(t1, frame_map()->address_for_slot(dest->double_stack_ix(), +++ lo_word_offset_in_bytes)); +++ } +++ break; +++ default: +++ ShouldNotReachHere(); +++ } +++} +++ +++void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) { +++ assert(src->is_constant(), "should not call otherwise"); +++ assert(dest->is_address(), "should not call otherwise"); +++ LIR_Const* c = src->as_constant_ptr(); +++ LIR_Address* to_addr = dest->as_address_ptr(); +++ void (MacroAssembler::* insn)(Register Rt, const Address &adr, Register temp); +++ switch (type) { +++ case T_ADDRESS: +++ assert(c->as_jint() == 0, "should be"); +++ insn = &MacroAssembler::sd; break; +++ case T_LONG: +++ assert(c->as_jlong() == 0, "should be"); +++ insn = &MacroAssembler::sd; break; +++ case T_DOUBLE: +++ assert(c->as_jdouble() == 0.0, "should be"); +++ insn = &MacroAssembler::sd; break; +++ case T_INT: +++ assert(c->as_jint() == 0, "should be"); +++ insn = &MacroAssembler::sw; break; +++ case T_FLOAT: +++ assert(c->as_jfloat() == 0.0f, "should be"); +++ insn = &MacroAssembler::sw; break; +++ case T_OBJECT: // fall through +++ case T_ARRAY: +++ assert(c->as_jobject() == 0, "should be"); +++ if (UseCompressedOops && !wide) { +++ insn = &MacroAssembler::sw; +++ } else { +++ insn = &MacroAssembler::sd; +++ } +++ break; +++ case T_CHAR: // fall through +++ case T_SHORT: +++ assert(c->as_jint() == 0, "should be"); +++ insn = &MacroAssembler::sh; +++ break; +++ case T_BOOLEAN: // fall through +++ case T_BYTE: +++ assert(c->as_jint() == 0, "should be"); +++ insn = &MacroAssembler::sb; break; +++ default: +++ ShouldNotReachHere(); +++ insn = &MacroAssembler::sd; // unreachable +++ } +++ if (info != NULL) { +++ add_debug_info_for_null_check_here(info); +++ } +++ (_masm->*insn)(zr, as_Address(to_addr), t0); +++} +++ +++void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) { +++ assert(src->is_register(), "should not call otherwise"); +++ assert(dest->is_register(), "should not call otherwise"); +++ +++ // move between cpu-registers +++ if (dest->is_single_cpu()) { +++ if (src->type() == T_LONG) { +++ // Can do LONG -> OBJECT +++ move_regs(src->as_register_lo(), dest->as_register()); +++ return; +++ } +++ assert(src->is_single_cpu(), "must match"); +++ if (src->type() == T_OBJECT) { +++ __ verify_oop(src->as_register()); +++ } +++ move_regs(src->as_register(), dest->as_register()); +++ } else if (dest->is_double_cpu()) { +++ if (is_reference_type(src->type())) { +++ __ verify_oop(src->as_register()); +++ move_regs(src->as_register(), dest->as_register_lo()); +++ return; +++ } +++ assert(src->is_double_cpu(), "must match"); +++ Register f_lo = src->as_register_lo(); +++ Register f_hi = src->as_register_hi(); +++ Register t_lo = dest->as_register_lo(); +++ Register t_hi = dest->as_register_hi(); +++ assert(f_hi == f_lo, "must be same"); +++ assert(t_hi == t_lo, "must be same"); +++ move_regs(f_lo, t_lo); +++ } else if (dest->is_single_fpu()) { +++ assert(src->is_single_fpu(), "expect single fpu"); +++ __ fmv_s(dest->as_float_reg(), src->as_float_reg()); +++ } else if (dest->is_double_fpu()) { +++ assert(src->is_double_fpu(), "expect double fpu"); +++ __ fmv_d(dest->as_double_reg(), src->as_double_reg()); +++ } else { +++ ShouldNotReachHere(); +++ } +++} +++ +++void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type, bool pop_fpu_stack) { +++ precond(src->is_register() && dest->is_stack()); +++ +++ uint const c_sz32 = sizeof(uint32_t); +++ uint const c_sz64 = sizeof(uint64_t); +++ +++ assert(src->is_register(), "should not call otherwise"); +++ assert(dest->is_stack(), "should not call otherwise"); +++ if (src->is_single_cpu()) { +++ int index = dest->single_stack_ix(); +++ if (is_reference_type(type)) { +++ __ sd(src->as_register(), stack_slot_address(index, c_sz64)); +++ __ verify_oop(src->as_register()); +++ } else if (type == T_METADATA || type == T_DOUBLE || type == T_ADDRESS) { +++ __ sd(src->as_register(), stack_slot_address(index, c_sz64)); +++ } else { +++ __ sw(src->as_register(), stack_slot_address(index, c_sz32)); +++ } +++ } else if (src->is_double_cpu()) { +++ int index = dest->double_stack_ix(); +++ Address dest_addr_LO = stack_slot_address(index, c_sz64, lo_word_offset_in_bytes); +++ __ sd(src->as_register_lo(), dest_addr_LO); +++ } else if (src->is_single_fpu()) { +++ int index = dest->single_stack_ix(); +++ __ fsw(src->as_float_reg(), stack_slot_address(index, c_sz32)); +++ } else if (src->is_double_fpu()) { +++ int index = dest->double_stack_ix(); +++ __ fsd(src->as_double_reg(), stack_slot_address(index, c_sz64)); +++ } else { +++ ShouldNotReachHere(); +++ } +++} +++ +++void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool wide, bool /* unaligned */) { +++ LIR_Address* to_addr = dest->as_address_ptr(); +++ // t0 was used as tmp reg in as_Address, so we use t1 as compressed_src +++ Register compressed_src = t1; +++ +++ if (patch_code != lir_patch_none) { +++ deoptimize_trap(info); +++ return; +++ } +++ +++ if (is_reference_type(type)) { +++ __ verify_oop(src->as_register()); +++ +++ if (UseCompressedOops && !wide) { +++ __ encode_heap_oop(compressed_src, src->as_register()); +++ } else { +++ compressed_src = src->as_register(); +++ } +++ } +++ +++ int null_check_here = code_offset(); +++ +++ switch (type) { +++ case T_FLOAT: +++ __ fsw(src->as_float_reg(), as_Address(to_addr)); +++ break; +++ +++ case T_DOUBLE: +++ __ fsd(src->as_double_reg(), as_Address(to_addr)); +++ break; +++ +++ case T_ARRAY: // fall through +++ case T_OBJECT: +++ if (UseCompressedOops && !wide) { +++ __ sw(compressed_src, as_Address(to_addr)); +++ } else { +++ __ sd(compressed_src, as_Address(to_addr)); +++ } +++ break; +++ case T_METADATA: +++ // We get here to store a method pointer to the stack to pass to +++ // a dtrace runtime call. This can't work on 64 bit with +++ // compressed klass ptrs: T_METADATA can be compressed klass +++ // ptr or a 64 bit method pointer. +++ ShouldNotReachHere(); +++ __ sd(src->as_register(), as_Address(to_addr)); +++ break; +++ case T_ADDRESS: +++ __ sd(src->as_register(), as_Address(to_addr)); +++ break; +++ case T_INT: +++ __ sw(src->as_register(), as_Address(to_addr)); +++ break; +++ case T_LONG: +++ __ sd(src->as_register_lo(), as_Address(to_addr)); +++ break; +++ case T_BYTE: // fall through +++ case T_BOOLEAN: +++ __ sb(src->as_register(), as_Address(to_addr)); +++ break; +++ case T_CHAR: // fall through +++ case T_SHORT: +++ __ sh(src->as_register(), as_Address(to_addr)); +++ break; +++ default: +++ ShouldNotReachHere(); +++ } +++ +++ if (info != NULL) { +++ add_debug_info_for_null_check(null_check_here, info); +++ } +++} +++ +++void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) { +++ precond(src->is_stack() && dest->is_register()); +++ +++ uint const c_sz32 = sizeof(uint32_t); +++ uint const c_sz64 = sizeof(uint64_t); +++ +++ if (dest->is_single_cpu()) { +++ int index = src->single_stack_ix(); +++ if (type == T_INT) { +++ __ lw(dest->as_register(), stack_slot_address(index, c_sz32)); +++ } else if (is_reference_type(type)) { +++ __ ld(dest->as_register(), stack_slot_address(index, c_sz64)); +++ __ verify_oop(dest->as_register()); +++ } else if (type == T_METADATA || type == T_ADDRESS) { +++ __ ld(dest->as_register(), stack_slot_address(index, c_sz64)); +++ } else { +++ __ lwu(dest->as_register(), stack_slot_address(index, c_sz32)); +++ } +++ } else if (dest->is_double_cpu()) { +++ int index = src->double_stack_ix(); +++ Address src_addr_LO = stack_slot_address(index, c_sz64, lo_word_offset_in_bytes); +++ __ ld(dest->as_register_lo(), src_addr_LO); +++ } else if (dest->is_single_fpu()) { +++ int index = src->single_stack_ix(); +++ __ flw(dest->as_float_reg(), stack_slot_address(index, c_sz32)); +++ } else if (dest->is_double_fpu()) { +++ int index = src->double_stack_ix(); +++ __ fld(dest->as_double_reg(), stack_slot_address(index, c_sz64)); +++ } else { +++ ShouldNotReachHere(); +++ } +++} +++ +++void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo* info) { +++ deoptimize_trap(info); +++} +++ +++void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) { +++ LIR_Opr temp; +++ if (type == T_LONG || type == T_DOUBLE) { +++ temp = FrameMap::t1_long_opr; +++ } else { +++ temp = FrameMap::t1_opr; +++ } +++ +++ stack2reg(src, temp, src->type()); +++ reg2stack(temp, dest, dest->type(), false); +++} +++ +++void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide, bool /* unaligned */) { +++ assert(src->is_address(), "should not call otherwise"); +++ assert(dest->is_register(), "should not call otherwise"); +++ +++ LIR_Address* addr = src->as_address_ptr(); +++ LIR_Address* from_addr = src->as_address_ptr(); +++ +++ if (addr->base()->type() == T_OBJECT) { +++ __ verify_oop(addr->base()->as_pointer_register()); +++ } +++ +++ if (patch_code != lir_patch_none) { +++ deoptimize_trap(info); +++ return; +++ } +++ +++ if (info != NULL) { +++ add_debug_info_for_null_check_here(info); +++ } +++ +++ int null_check_here = code_offset(); +++ switch (type) { +++ case T_FLOAT: +++ __ flw(dest->as_float_reg(), as_Address(from_addr)); +++ break; +++ case T_DOUBLE: +++ __ fld(dest->as_double_reg(), as_Address(from_addr)); +++ break; +++ case T_ARRAY: // fall through +++ case T_OBJECT: +++ if (UseCompressedOops && !wide) { +++ __ lwu(dest->as_register(), as_Address(from_addr)); +++ } else { +++ __ ld(dest->as_register(), as_Address(from_addr)); +++ } +++ break; +++ case T_METADATA: +++ // We get here to store a method pointer to the stack to pass to +++ // a dtrace runtime call. This can't work on 64 bit with +++ // compressed klass ptrs: T_METADATA can be a compressed klass +++ // ptr or a 64 bit method pointer. +++ ShouldNotReachHere(); +++ __ ld(dest->as_register(), as_Address(from_addr)); +++ break; +++ case T_ADDRESS: +++ if (UseCompressedClassPointers && addr->disp() == oopDesc::klass_offset_in_bytes()) { +++ __ lwu(dest->as_register(), as_Address(from_addr)); +++ } else { +++ __ ld(dest->as_register(), as_Address(from_addr)); +++ } +++ break; +++ case T_INT: +++ __ lw(dest->as_register(), as_Address(from_addr)); +++ break; +++ case T_LONG: +++ __ ld(dest->as_register_lo(), as_Address_lo(from_addr)); +++ break; +++ case T_BYTE: +++ __ lb(dest->as_register(), as_Address(from_addr)); +++ break; +++ case T_BOOLEAN: +++ __ lbu(dest->as_register(), as_Address(from_addr)); +++ break; +++ case T_CHAR: +++ __ lhu(dest->as_register(), as_Address(from_addr)); +++ break; +++ case T_SHORT: +++ __ lh(dest->as_register(), as_Address(from_addr)); +++ break; +++ default: +++ ShouldNotReachHere(); +++ } +++ +++ if (is_reference_type(type)) { +++ if (UseCompressedOops && !wide) { +++ __ decode_heap_oop(dest->as_register()); +++ } +++ +++ if (!UseZGC) { +++ // Load barrier has not yet been applied, so ZGC can't verify the oop here +++ __ verify_oop(dest->as_register()); +++ } +++ } else if (type == T_ADDRESS && addr->disp() == oopDesc::klass_offset_in_bytes()) { +++ if (UseCompressedClassPointers) { +++ __ decode_klass_not_null(dest->as_register()); +++ } +++ } +++} +++ +++void LIR_Assembler::emit_op3(LIR_Op3* op) { +++ switch (op->code()) { +++ case lir_idiv: // fall through +++ case lir_irem: +++ arithmetic_idiv(op->code(), +++ op->in_opr1(), +++ op->in_opr2(), +++ op->in_opr3(), +++ op->result_opr(), +++ op->info()); +++ break; +++ case lir_fmad: +++ __ fmadd_d(op->result_opr()->as_double_reg(), +++ op->in_opr1()->as_double_reg(), +++ op->in_opr2()->as_double_reg(), +++ op->in_opr3()->as_double_reg()); +++ break; +++ case lir_fmaf: +++ __ fmadd_s(op->result_opr()->as_float_reg(), +++ op->in_opr1()->as_float_reg(), +++ op->in_opr2()->as_float_reg(), +++ op->in_opr3()->as_float_reg()); +++ break; +++ default: +++ ShouldNotReachHere(); +++ } +++} +++ +++void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type, +++ LIR_Opr cmp_opr1, LIR_Opr cmp_opr2) { +++ Label label; +++ +++ emit_branch(condition, cmp_opr1, cmp_opr2, label, /* is_far */ false, +++ /* is_unordered */ (condition == lir_cond_greaterEqual || condition == lir_cond_greater) ? false : true); +++ +++ Label done; +++ move_op(opr2, result, type, lir_patch_none, NULL, +++ false, // pop_fpu_stack +++ false, // wide +++ false); // unaligned +++ __ j(done); +++ __ bind(label); +++ move_op(opr1, result, type, lir_patch_none, NULL, +++ false, // pop_fpu_stack +++ false, // wide +++ false); // unaligned +++ __ bind(done); +++} +++ +++void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) { +++ LIR_Condition condition = op->cond(); +++ if (condition == lir_cond_always) { +++ if (op->info() != NULL) { +++ add_debug_info_for_branch(op->info()); +++ } +++ } else { +++ assert(op->in_opr1() != LIR_OprFact::illegalOpr && op->in_opr2() != LIR_OprFact::illegalOpr, "conditional branches must have legal operands"); +++ } +++ bool is_unordered = (op->ublock() == op->block()); +++ emit_branch(condition, op->in_opr1(), op->in_opr2(), *op->label(), /* is_far */ true, is_unordered); +++} +++ +++void LIR_Assembler::emit_branch(LIR_Condition cmp_flag, LIR_Opr cmp1, LIR_Opr cmp2, Label& label, +++ bool is_far, bool is_unordered) { +++ +++ if (cmp_flag == lir_cond_always) { +++ __ j(label); +++ return; +++ } +++ +++ if (cmp1->is_cpu_register()) { +++ Register reg1 = as_reg(cmp1); +++ if (cmp2->is_cpu_register()) { +++ Register reg2 = as_reg(cmp2); +++ __ c1_cmp_branch(cmp_flag, reg1, reg2, label, cmp1->type(), is_far); +++ } else if (cmp2->is_constant()) { +++ const2reg_helper(cmp2); +++ __ c1_cmp_branch(cmp_flag, reg1, t0, label, cmp2->type(), is_far); +++ } else { +++ ShouldNotReachHere(); +++ } +++ } else if (cmp1->is_single_fpu()) { +++ assert(cmp2->is_single_fpu(), "expect single float register"); +++ __ c1_float_cmp_branch(cmp_flag, cmp1->as_float_reg(), cmp2->as_float_reg(), label, is_far, is_unordered); +++ } else if (cmp1->is_double_fpu()) { +++ assert(cmp2->is_double_fpu(), "expect double float register"); +++ __ c1_float_cmp_branch(cmp_flag | C1_MacroAssembler::c1_double_branch_mask, +++ cmp1->as_double_reg(), cmp2->as_double_reg(), label, is_far, is_unordered); +++ } else { +++ ShouldNotReachHere(); +++ } +++} +++ +++void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) { +++ LIR_Opr src = op->in_opr(); +++ LIR_Opr dest = op->result_opr(); +++ +++ switch (op->bytecode()) { +++ case Bytecodes::_i2f: +++ __ fcvt_s_w(dest->as_float_reg(), src->as_register()); break; +++ case Bytecodes::_i2d: +++ __ fcvt_d_w(dest->as_double_reg(), src->as_register()); break; +++ case Bytecodes::_l2d: +++ __ fcvt_d_l(dest->as_double_reg(), src->as_register_lo()); break; +++ case Bytecodes::_l2f: +++ __ fcvt_s_l(dest->as_float_reg(), src->as_register_lo()); break; +++ case Bytecodes::_f2d: +++ __ fcvt_d_s(dest->as_double_reg(), src->as_float_reg()); break; +++ case Bytecodes::_d2f: +++ __ fcvt_s_d(dest->as_float_reg(), src->as_double_reg()); break; +++ case Bytecodes::_i2c: +++ __ zero_extend(dest->as_register(), src->as_register(), 16); break; +++ case Bytecodes::_i2l: +++ __ sign_extend(dest->as_register_lo(), src->as_register(), 32); break; +++ case Bytecodes::_i2s: +++ __ sign_extend(dest->as_register(), src->as_register(), 16); break; +++ case Bytecodes::_i2b: +++ __ sign_extend(dest->as_register(), src->as_register(), 8); break; +++ case Bytecodes::_l2i: +++ __ sign_extend(dest->as_register(), src->as_register_lo(), 32); break; +++ case Bytecodes::_d2l: +++ __ fcvt_l_d_safe(dest->as_register_lo(), src->as_double_reg()); break; +++ case Bytecodes::_f2i: +++ __ fcvt_w_s_safe(dest->as_register(), src->as_float_reg()); break; +++ case Bytecodes::_f2l: +++ __ fcvt_l_s_safe(dest->as_register_lo(), src->as_float_reg()); break; +++ case Bytecodes::_d2i: +++ __ fcvt_w_d_safe(dest->as_register(), src->as_double_reg()); break; +++ default: +++ ShouldNotReachHere(); +++ } +++} +++ +++void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) { +++ if (op->init_check()) { +++ __ lbu(t0, Address(op->klass()->as_register(), +++ InstanceKlass::init_state_offset())); +++ __ mv(t1, (u1)InstanceKlass::fully_initialized); +++ add_debug_info_for_null_check_here(op->stub()->info()); +++ __ bne(t0, t1, *op->stub()->entry(), /* is_far */ true); +++ } +++ +++ __ allocate_object(op->obj()->as_register(), +++ op->tmp1()->as_register(), +++ op->tmp2()->as_register(), +++ op->header_size(), +++ op->object_size(), +++ op->klass()->as_register(), +++ *op->stub()->entry()); +++ +++ __ bind(*op->stub()->continuation()); +++} +++ +++void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) { +++ Register len = op->len()->as_register(); +++ +++ if (UseSlowPath || +++ (!UseFastNewObjectArray && is_reference_type(op->type())) || +++ (!UseFastNewTypeArray && !is_reference_type(op->type()))) { +++ __ j(*op->stub()->entry()); +++ } else { +++ Register tmp1 = op->tmp1()->as_register(); +++ Register tmp2 = op->tmp2()->as_register(); +++ Register tmp3 = op->tmp3()->as_register(); +++ if (len == tmp1) { +++ tmp1 = tmp3; +++ } else if (len == tmp2) { +++ tmp2 = tmp3; +++ } else if (len == tmp3) { +++ // everything is ok +++ } else { +++ __ mv(tmp3, len); +++ } +++ __ allocate_array(op->obj()->as_register(), +++ len, +++ tmp1, +++ tmp2, +++ arrayOopDesc::header_size(op->type()), +++ array_element_size(op->type()), +++ op->klass()->as_register(), +++ *op->stub()->entry()); +++ } +++ __ bind(*op->stub()->continuation()); +++} +++ +++void LIR_Assembler::type_profile_helper(Register mdo, ciMethodData *md, ciProfileData *data, +++ Register recv, Label* update_done) { +++ for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) { +++ Label next_test; +++ // See if the receiver is receiver[n]. +++ __ ld(t1, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)))); +++ __ bne(recv, t1, next_test); +++ Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i))); +++ __ increment(data_addr, DataLayout::counter_increment); +++ __ j(*update_done); +++ __ bind(next_test); +++ } +++ +++ // Didn't find receiver; find next empty slot and fill it in +++ for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) { +++ Label next_test; +++ Address recv_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i))); +++ __ ld(t1, recv_addr); +++ __ bnez(t1, next_test); +++ __ sd(recv, recv_addr); +++ __ mv(t1, DataLayout::counter_increment); +++ __ sd(t1, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)))); +++ __ j(*update_done); +++ __ bind(next_test); +++ } +++} +++ +++void LIR_Assembler::data_check(LIR_OpTypeCheck *op, ciMethodData **md, ciProfileData **data) { +++ ciMethod* method = op->profiled_method(); +++ assert(method != NULL, "Should have method"); +++ int bci = op->profiled_bci(); +++ *md = method->method_data_or_null(); +++ guarantee(*md != NULL, "Sanity"); +++ *data = ((*md)->bci_to_data(bci)); +++ assert(*data != NULL, "need data for type check"); +++ assert((*data)->is_ReceiverTypeData(), "need ReceiverTypeData for type check"); +++} +++ +++void LIR_Assembler::typecheck_helper_slowcheck(ciKlass *k, Register obj, Register Rtmp1, +++ Register k_RInfo, Register klass_RInfo, +++ Label *failure_target, Label *success_target) { +++ // get object class +++ // not a safepoint as obj null check happens earlier +++ __ load_klass(klass_RInfo, obj); +++ if (k->is_loaded()) { +++ // See if we get an immediate positive hit +++ __ ld(t0, Address(klass_RInfo, int64_t(k->super_check_offset()))); +++ if ((juint)in_bytes(Klass::secondary_super_cache_offset()) != k->super_check_offset()) { +++ __ bne(k_RInfo, t0, *failure_target, /* is_far */ true); +++ // successful cast, fall through to profile or jump +++ } else { +++ // See if we get an immediate positive hit +++ __ beq(k_RInfo, t0, *success_target); +++ // check for self +++ __ beq(klass_RInfo, k_RInfo, *success_target); +++ +++ __ addi(sp, sp, -2 * wordSize); // 2: store k_RInfo and klass_RInfo +++ __ sd(k_RInfo, Address(sp, 0)); // sub klass +++ __ sd(klass_RInfo, Address(sp, wordSize)); // super klass +++ __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id))); +++ // load result to k_RInfo +++ __ ld(k_RInfo, Address(sp, 0)); +++ __ addi(sp, sp, 2 * wordSize); // 2: pop out k_RInfo and klass_RInfo +++ // result is a boolean +++ __ beqz(k_RInfo, *failure_target, /* is_far */ true); +++ // successful cast, fall through to profile or jump +++ } +++ } else { +++ // perform the fast part of the checking logic +++ __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL); +++ // call out-of-line instance of __ check_klass_subtytpe_slow_path(...) +++ __ addi(sp, sp, -2 * wordSize); // 2: store k_RInfo and klass_RInfo +++ __ sd(klass_RInfo, Address(sp, wordSize)); // sub klass +++ __ sd(k_RInfo, Address(sp, 0)); // super klass +++ __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id))); +++ // load result to k_RInfo +++ __ ld(k_RInfo, Address(sp, 0)); +++ __ addi(sp, sp, 2 * wordSize); // 2: pop out k_RInfo and klass_RInfo +++ // result is a boolean +++ __ beqz(k_RInfo, *failure_target, /* is_far */ true); +++ // successful cast, fall thriugh to profile or jump +++ } +++} +++ +++void LIR_Assembler::profile_object(ciMethodData* md, ciProfileData* data, Register obj, +++ Register klass_RInfo, Label* obj_is_null) { +++ Label not_null; +++ __ bnez(obj, not_null); +++ // Object is null, update MDO and exit +++ Register mdo = klass_RInfo; +++ __ mov_metadata(mdo, md->constant_encoding()); +++ Address data_addr = __ form_address(t1, mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset())); +++ __ lbu(t0, data_addr); +++ __ ori(t0, t0, BitData::null_seen_byte_constant()); +++ __ sb(t0, data_addr); +++ __ j(*obj_is_null); +++ __ bind(not_null); +++} +++ +++void LIR_Assembler::typecheck_loaded(LIR_OpTypeCheck *op, ciKlass* k, Register k_RInfo) { +++ if (!k->is_loaded()) { +++ klass2reg_with_patching(k_RInfo, op->info_for_patch()); +++ } else { +++ __ mov_metadata(k_RInfo, k->constant_encoding()); +++ } +++} +++ +++void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) { +++ Register obj = op->object()->as_register(); +++ Register k_RInfo = op->tmp1()->as_register(); +++ Register klass_RInfo = op->tmp2()->as_register(); +++ Register dst = op->result_opr()->as_register(); +++ ciKlass* k = op->klass(); +++ Register Rtmp1 = noreg; +++ +++ // check if it needs to be profiled +++ ciMethodData* md = NULL; +++ ciProfileData* data = NULL; +++ +++ const bool should_profile = op->should_profile(); +++ if (should_profile) { +++ data_check(op, &md, &data); +++ } +++ Label profile_cast_success, profile_cast_failure; +++ Label *success_target = should_profile ? &profile_cast_success : success; +++ Label *failure_target = should_profile ? &profile_cast_failure : failure; +++ +++ if (obj == k_RInfo) { +++ k_RInfo = dst; +++ } else if (obj == klass_RInfo) { +++ klass_RInfo = dst; +++ } +++ if (k->is_loaded() && !UseCompressedClassPointers) { +++ select_different_registers(obj, dst, k_RInfo, klass_RInfo); +++ } else { +++ Rtmp1 = op->tmp3()->as_register(); +++ select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1); +++ } +++ +++ assert_different_registers(obj, k_RInfo, klass_RInfo); +++ +++ if (should_profile) { +++ profile_object(md, data, obj, klass_RInfo, obj_is_null); +++ } else { +++ __ beqz(obj, *obj_is_null); +++ } +++ +++ typecheck_loaded(op, k, k_RInfo); +++ __ verify_oop(obj); +++ +++ if (op->fast_check()) { +++ // get object class +++ // not a safepoint as obj null check happens earlier +++ __ load_klass(t0, obj, t1); +++ __ bne(t0, k_RInfo, *failure_target, /* is_far */ true); +++ // successful cast, fall through to profile or jump +++ } else { +++ typecheck_helper_slowcheck(k, obj, Rtmp1, k_RInfo, klass_RInfo, failure_target, success_target); +++ } +++ if (should_profile) { +++ type_profile(obj, md, klass_RInfo, k_RInfo, data, success, failure, profile_cast_success, profile_cast_failure); +++ } +++ __ j(*success); +++} +++ +++void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) { +++ const bool should_profile = op->should_profile(); +++ +++ LIR_Code code = op->code(); +++ if (code == lir_store_check) { +++ typecheck_lir_store(op, should_profile); +++ } else if (code == lir_checkcast) { +++ Register obj = op->object()->as_register(); +++ Register dst = op->result_opr()->as_register(); +++ Label success; +++ emit_typecheck_helper(op, &success, op->stub()->entry(), &success); +++ __ bind(success); +++ if (dst != obj) { +++ __ mv(dst, obj); +++ } +++ } else if (code == lir_instanceof) { +++ Register obj = op->object()->as_register(); +++ Register dst = op->result_opr()->as_register(); +++ Label success, failure, done; +++ emit_typecheck_helper(op, &success, &failure, &failure); +++ __ bind(failure); +++ __ mv(dst, zr); +++ __ j(done); +++ __ bind(success); +++ __ mv(dst, 1); +++ __ bind(done); +++ } else { +++ ShouldNotReachHere(); +++ } +++} +++ +++void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) { +++ assert(VM_Version::supports_cx8(), "wrong machine"); +++ Register addr; +++ if (op->addr()->is_register()) { +++ addr = as_reg(op->addr()); +++ } else { +++ assert(op->addr()->is_address(), "what else?"); +++ LIR_Address* addr_ptr = op->addr()->as_address_ptr(); +++ assert(addr_ptr->disp() == 0, "need 0 disp"); +++ assert(addr_ptr->index() == LIR_OprDesc::illegalOpr(), "need 0 index"); +++ addr = as_reg(addr_ptr->base()); +++ } +++ Register newval = as_reg(op->new_value()); +++ Register cmpval = as_reg(op->cmp_value()); +++ +++ if (op->code() == lir_cas_obj) { +++ if (UseCompressedOops) { +++ Register tmp1 = op->tmp1()->as_register(); +++ assert(op->tmp1()->is_valid(), "must be"); +++ __ encode_heap_oop(tmp1, cmpval); +++ cmpval = tmp1; +++ __ encode_heap_oop(t1, newval); +++ newval = t1; +++ caswu(addr, newval, cmpval); +++ } else { +++ casl(addr, newval, cmpval); +++ } +++ } else if (op->code() == lir_cas_int) { +++ casw(addr, newval, cmpval); +++ } else { +++ casl(addr, newval, cmpval); +++ } +++} +++ +++void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr unused, LIR_Opr dest, LIR_Op* op) { +++ switch (code) { +++ case lir_abs: __ fabs_d(dest->as_double_reg(), value->as_double_reg()); break; +++ case lir_sqrt: __ fsqrt_d(dest->as_double_reg(), value->as_double_reg()); break; +++ default: ShouldNotReachHere(); +++ } +++} +++ +++void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst) { +++ assert(left->is_single_cpu() || left->is_double_cpu(), "expect single or double register"); +++ Register Rleft = left->is_single_cpu() ? left->as_register() : left->as_register_lo(); +++ if (dst->is_single_cpu()) { +++ Register Rdst = dst->as_register(); +++ if (right->is_constant()) { +++ int right_const = right->as_jint(); +++ if (Assembler::is_simm12(right_const)) { +++ logic_op_imm(Rdst, Rleft, right_const, code); +++ __ sign_extend(Rdst, Rdst, 32); +++ } else { +++ __ mv(t0, right_const); +++ logic_op_reg32(Rdst, Rleft, t0, code); +++ } +++ } else { +++ Register Rright = right->is_single_cpu() ? right->as_register() : right->as_register_lo(); +++ logic_op_reg32(Rdst, Rleft, Rright, code); +++ } +++ } else { +++ Register Rdst = dst->as_register_lo(); +++ if (right->is_constant()) { +++ long right_const = right->as_jlong(); +++ if (Assembler::is_simm12(right_const)) { +++ logic_op_imm(Rdst, Rleft, right_const, code); +++ } else { +++ __ mv(t0, right_const); +++ logic_op_reg(Rdst, Rleft, t0, code); +++ } +++ } else { +++ Register Rright = right->is_single_cpu() ? right->as_register() : right->as_register_lo(); +++ logic_op_reg(Rdst, Rleft, Rright, code); +++ } +++ } +++} +++ +++void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr src, LIR_Opr result, LIR_Op2* op) { +++ ShouldNotCallThis(); +++} +++ +++void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op) { +++ if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) { +++ bool is_unordered_less = (code == lir_ucmp_fd2i); +++ if (left->is_single_fpu()) { +++ __ float_cmp(true, is_unordered_less ? -1 : 1, +++ left->as_float_reg(), right->as_float_reg(), dst->as_register()); +++ } else if (left->is_double_fpu()) { +++ __ float_cmp(false, is_unordered_less ? -1 : 1, +++ left->as_double_reg(), right->as_double_reg(), dst->as_register()); +++ } else { +++ ShouldNotReachHere(); +++ } +++ } else if (code == lir_cmp_l2i) { +++ __ cmp_l2i(dst->as_register(), left->as_register_lo(), right->as_register_lo()); +++ } else { +++ ShouldNotReachHere(); +++ } +++} +++ +++void LIR_Assembler::align_call(LIR_Code code) { +++ // With RVC a call instruction may get 2-byte aligned. +++ // The address of the call instruction needs to be 4-byte aligned to +++ // ensure that it does not span a cache line so that it can be patched. +++ __ align(NativeInstruction::instruction_size); +++} +++ +++void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) { +++ address call = __ trampoline_call(Address(op->addr(), rtype)); +++ if (call == NULL) { +++ bailout("trampoline stub overflow"); +++ return; +++ } +++ add_call_info(code_offset(), op->info()); +++} +++ +++void LIR_Assembler::ic_call(LIR_OpJavaCall* op) { +++ address call = __ ic_call(op->addr()); +++ if (call == NULL) { +++ bailout("trampoline stub overflow"); +++ return; +++ } +++ add_call_info(code_offset(), op->info()); +++} +++ +++void LIR_Assembler::emit_static_call_stub() { +++ address call_pc = __ pc(); +++ MacroAssembler::assert_alignment(call_pc); +++ address stub = __ start_a_stub(call_stub_size()); +++ if (stub == NULL) { +++ bailout("static call stub overflow"); +++ return; +++ } +++ +++ int start = __ offset(); +++ +++ __ relocate(static_stub_Relocation::spec(call_pc)); +++ __ emit_static_call_stub(); +++ +++ assert(__ offset() - start + CompiledStaticCall::to_trampoline_stub_size() +++ <= call_stub_size(), "stub too big"); +++ __ end_a_stub(); +++} +++ +++void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) { +++ assert(exceptionOop->as_register() == x10, "must match"); +++ assert(exceptionPC->as_register() == x13, "must match"); +++ +++ // exception object is not added to oop map by LinearScan +++ // (LinearScan assumes that no oops are in fixed registers) +++ info->add_register_oop(exceptionOop); +++ Runtime1::StubID unwind_id; +++ +++ // get current pc information +++ // pc is only needed if the method has an exception handler, the unwind code does not need it. +++ if (compilation()->debug_info_recorder()->last_pc_offset() == __ offset()) { +++ // As no instructions have been generated yet for this LIR node it's +++ // possible that an oop map already exists for the current offset. +++ // In that case insert an dummy NOP here to ensure all oop map PCs +++ // are unique. See JDK-8237483. +++ __ nop(); +++ } +++ int pc_for_athrow_offset = __ offset(); +++ InternalAddress pc_for_athrow(__ pc()); +++ __ relocate(pc_for_athrow.rspec(), [&] { +++ int32_t offset; +++ __ la_patchable(exceptionPC->as_register(), pc_for_athrow, offset); +++ __ addi(exceptionPC->as_register(), exceptionPC->as_register(), offset); +++ }); +++ add_call_info(pc_for_athrow_offset, info); // for exception handler +++ +++ __ verify_not_null_oop(x10); +++ // search an exception handler (x10: exception oop, x13: throwing pc) +++ if (compilation()->has_fpu_code()) { +++ unwind_id = Runtime1::handle_exception_id; +++ } else { +++ unwind_id = Runtime1::handle_exception_nofpu_id; +++ } +++ __ far_call(RuntimeAddress(Runtime1::entry_for(unwind_id))); +++ __ nop(); +++} +++ +++void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) { +++ assert(exceptionOop->as_register() == x10, "must match"); +++ __ j(_unwind_handler_entry); +++} +++ +++void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) { +++ Register left_reg = left->is_single_cpu() ? left->as_register() : left->as_register_lo(); +++ Register dest_reg = dest->is_single_cpu() ? dest->as_register() : dest->as_register_lo(); +++ Register count_reg = count->as_register(); +++ if (dest->is_single_cpu()) { +++ assert (dest->type() == T_INT, "unexpected result type"); +++ assert (left->type() == T_INT, "unexpected left type"); +++ __ andi(t0, count_reg, 31); // should not shift more than 31 bits +++ switch (code) { +++ case lir_shl: __ sllw(dest_reg, left_reg, t0); break; +++ case lir_shr: __ sraw(dest_reg, left_reg, t0); break; +++ case lir_ushr: __ srlw(dest_reg, left_reg, t0); break; +++ default: ShouldNotReachHere(); +++ } +++ } else if (dest->is_double_cpu()) { +++ __ andi(t0, count_reg, 63); // should not shift more than 63 bits +++ switch (code) { +++ case lir_shl: __ sll(dest_reg, left_reg, t0); break; +++ case lir_shr: __ sra(dest_reg, left_reg, t0); break; +++ case lir_ushr: __ srl(dest_reg, left_reg, t0); break; +++ default: ShouldNotReachHere(); +++ } +++ } else { +++ ShouldNotReachHere(); +++ } +++} +++ +++void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) { +++ Register left_reg = left->is_single_cpu() ? left->as_register() : left->as_register_lo(); +++ Register dest_reg = dest->is_single_cpu() ? dest->as_register() : dest->as_register_lo(); +++ if (dest->is_single_cpu()) { +++ assert (dest->type() == T_INT, "unexpected result type"); +++ assert (left->type() == T_INT, "unexpected left type"); +++ count &= 0x1f; +++ if (count != 0) { +++ switch (code) { +++ case lir_shl: __ slliw(dest_reg, left_reg, count); break; +++ case lir_shr: __ sraiw(dest_reg, left_reg, count); break; +++ case lir_ushr: __ srliw(dest_reg, left_reg, count); break; +++ default: ShouldNotReachHere(); +++ } +++ } else { +++ move_regs(left_reg, dest_reg); +++ } +++ } else if (dest->is_double_cpu()) { +++ count &= 0x3f; +++ if (count != 0) { +++ switch (code) { +++ case lir_shl: __ slli(dest_reg, left_reg, count); break; +++ case lir_shr: __ srai(dest_reg, left_reg, count); break; +++ case lir_ushr: __ srli(dest_reg, left_reg, count); break; +++ default: ShouldNotReachHere(); +++ } +++ } else { +++ move_regs(left->as_register_lo(), dest->as_register_lo()); +++ } +++ } else { +++ ShouldNotReachHere(); +++ } +++} +++ +++void LIR_Assembler::emit_lock(LIR_OpLock* op) { +++ Register obj = op->obj_opr()->as_register(); // may not be an oop +++ Register hdr = op->hdr_opr()->as_register(); +++ Register lock = op->lock_opr()->as_register(); +++ if (!UseFastLocking) { +++ __ j(*op->stub()->entry()); +++ } else if (op->code() == lir_lock) { +++ Register tmp = noreg; +++ if (UseBiasedLocking) { +++ tmp = op->scratch_opr()->as_register(); +++ } +++ assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); +++ // add debug info for NullPointerException only if one is possible +++ int null_check_offset = __ lock_object(hdr, obj, lock, tmp, *op->stub()->entry()); +++ if (op->info() != NULL) { +++ add_debug_info_for_null_check(null_check_offset, op->info()); +++ } +++ } else if (op->code() == lir_unlock) { +++ assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); +++ __ unlock_object(hdr, obj, lock, *op->stub()->entry()); +++ } else { +++ Unimplemented(); +++ } +++ __ bind(*op->stub()->continuation()); +++} +++ +++void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) { +++ ciMethod* method = op->profiled_method(); +++ int bci = op->profiled_bci(); +++ +++ // Update counter for all call types +++ ciMethodData* md = method->method_data_or_null(); +++ guarantee(md != NULL, "Sanity"); +++ ciProfileData* data = md->bci_to_data(bci); +++ assert(data != NULL && data->is_CounterData(), "need CounterData for calls"); +++ assert(op->mdo()->is_single_cpu(), "mdo must be allocated"); +++ Register mdo = op->mdo()->as_register(); +++ __ mov_metadata(mdo, md->constant_encoding()); +++ Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset())); +++ // Perform additional virtual call profiling for invokevirtual and +++ // invokeinterface bytecodes +++ if (op->should_profile_receiver_type()) { +++ assert(op->recv()->is_single_cpu(), "recv must be allocated"); +++ Register recv = op->recv()->as_register(); +++ assert_different_registers(mdo, recv); +++ assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls"); +++ ciKlass* known_klass = op->known_holder(); +++ if (C1OptimizeVirtualCallProfiling && known_klass != NULL) { +++ // We know the type that will be seen at this call site; we can +++ // statically update the MethodData* rather than needing to do +++ // dynamic tests on the receiver type +++ // NOTE: we should probably put a lock around this search to +++ // avoid collisions by concurrent compilations +++ ciVirtualCallData* vc_data = (ciVirtualCallData*) data; +++ uint i; +++ for (i = 0; i < VirtualCallData::row_limit(); i++) { +++ ciKlass* receiver = vc_data->receiver(i); +++ if (known_klass->equals(receiver)) { +++ Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i))); +++ __ increment(data_addr, DataLayout::counter_increment); +++ return; +++ } +++ } +++ +++ // Receiver type not found in profile data; select an empty slot +++ // Note that this is less efficient than it should be because it +++ // always does a write to the receiver part of the +++ // VirtualCallData rather than just the first time +++ for (i = 0; i < VirtualCallData::row_limit(); i++) { +++ ciKlass* receiver = vc_data->receiver(i); +++ if (receiver == NULL) { +++ Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i))); +++ __ mov_metadata(t1, known_klass->constant_encoding()); +++ __ sd(t1, recv_addr); +++ Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i))); +++ __ increment(data_addr, DataLayout::counter_increment); +++ return; +++ } +++ } +++ } else { +++ __ load_klass(recv, recv); +++ Label update_done; +++ type_profile_helper(mdo, md, data, recv, &update_done); +++ // Receiver did not match any saved receiver and there is no empty row for it. +++ // Increment total counter to indicate polymorphic case. +++ __ increment(counter_addr, DataLayout::counter_increment); +++ +++ __ bind(update_done); +++ } +++ } else { +++ // Static call +++ __ increment(counter_addr, DataLayout::counter_increment); +++ } +++} +++ +++void LIR_Assembler::emit_delay(LIR_OpDelay*) { Unimplemented(); } +++ +++void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst) { +++ __ la(dst->as_register(), frame_map()->address_for_monitor_lock(monitor_no)); +++} +++ +++void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) { Unimplemented(); } +++ +++void LIR_Assembler::check_conflict(ciKlass* exact_klass, intptr_t current_klass, +++ Register tmp, Label &next, Label &none, +++ Address mdo_addr) { +++ if (exact_klass == NULL || TypeEntries::is_type_none(current_klass)) { +++ if (exact_klass != NULL) { +++ __ mov_metadata(tmp, exact_klass->constant_encoding()); +++ } else { +++ __ load_klass(tmp, tmp); +++ } +++ +++ __ ld(t1, mdo_addr); +++ __ xorr(tmp, tmp, t1); +++ __ andi(t0, tmp, TypeEntries::type_klass_mask); +++ // klass seen before, nothing to do. The unknown bit may have been +++ // set already but no need to check. +++ __ beqz(t0, next); +++ +++ // already unknown. Nothing to do anymore. +++ __ test_bit(t0, tmp, exact_log2(TypeEntries::type_unknown)); +++ __ bnez(t0, next); +++ +++ if (TypeEntries::is_type_none(current_klass)) { +++ __ beqz(t1, none); +++ __ mv(t0, (u1)TypeEntries::null_seen); +++ __ beq(t0, t1, none); +++ // There is a chance that the checks above (re-reading profiling +++ // data from memory) fail if another thread has just set the +++ // profiling to this obj's klass +++ __ membar(MacroAssembler::LoadLoad); +++ __ ld(t1, mdo_addr); +++ __ xorr(tmp, tmp, t1); +++ __ andi(t0, tmp, TypeEntries::type_klass_mask); +++ __ beqz(t0, next); +++ } +++ } else { +++ assert(ciTypeEntries::valid_ciklass(current_klass) != NULL && +++ ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only"); +++ +++ __ ld(tmp, mdo_addr); +++ // already unknown. Nothing to do anymore. +++ __ test_bit(t0, tmp, exact_log2(TypeEntries::type_unknown)); +++ __ bnez(t0, next); +++ } +++ +++ // different than before. Cannot keep accurate profile. +++ __ ld(t1, mdo_addr); +++ __ ori(t1, t1, TypeEntries::type_unknown); +++ __ sd(t1, mdo_addr); +++ +++ if (TypeEntries::is_type_none(current_klass)) { +++ __ j(next); +++ +++ __ bind(none); +++ // first time here. Set profile type. +++ __ sd(tmp, mdo_addr); +++ } +++} +++ +++void LIR_Assembler::check_no_conflict(ciKlass* exact_klass, intptr_t current_klass, Register tmp, +++ Address mdo_addr, Label &next) { +++ // There's a single possible klass at this profile point +++ assert(exact_klass != NULL, "should be"); +++ if (TypeEntries::is_type_none(current_klass)) { +++ __ mov_metadata(tmp, exact_klass->constant_encoding()); +++ __ ld(t1, mdo_addr); +++ __ xorr(tmp, tmp, t1); +++ __ andi(t0, tmp, TypeEntries::type_klass_mask); +++ __ beqz(t0, next); +++#ifdef ASSERT +++ { +++ Label ok; +++ __ ld(t0, mdo_addr); +++ __ beqz(t0, ok); +++ __ mv(t1, (u1)TypeEntries::null_seen); +++ __ beq(t0, t1, ok); +++ // may have been set by another thread +++ __ membar(MacroAssembler::LoadLoad); +++ __ mov_metadata(t0, exact_klass->constant_encoding()); +++ __ ld(t1, mdo_addr); +++ __ xorr(t1, t0, t1); +++ __ andi(t1, t1, TypeEntries::type_mask); +++ __ beqz(t1, ok); +++ +++ __ stop("unexpected profiling mismatch"); +++ __ bind(ok); +++ } +++#endif +++ // first time here. Set profile type. +++ __ sd(tmp, mdo_addr); +++ } else { +++ assert(ciTypeEntries::valid_ciklass(current_klass) != NULL && +++ ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent"); +++ +++ __ ld(tmp, mdo_addr); +++ // already unknown. Nothing to do anymore. +++ __ test_bit(t0, tmp, exact_log2(TypeEntries::type_unknown)); +++ __ bnez(t0, next); +++ +++ __ ori(tmp, tmp, TypeEntries::type_unknown); +++ __ sd(tmp, mdo_addr); +++ } +++} +++ +++void LIR_Assembler::check_null(Register tmp, Label &update, intptr_t current_klass, +++ Address mdo_addr, bool do_update, Label &next) { +++ __ bnez(tmp, update); +++ if (!TypeEntries::was_null_seen(current_klass)) { +++ __ ld(t1, mdo_addr); +++ __ ori(t1, t1, TypeEntries::null_seen); +++ __ sd(t1, mdo_addr); +++ } +++ if (do_update) { +++ __ j(next); +++ } +++} +++ +++void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) { +++ COMMENT("emit_profile_type {"); +++ Register obj = op->obj()->as_register(); +++ Register tmp = op->tmp()->as_pointer_register(); +++ Address mdo_addr = as_Address(op->mdp()->as_address_ptr()); +++ ciKlass* exact_klass = op->exact_klass(); +++ intptr_t current_klass = op->current_klass(); +++ bool not_null = op->not_null(); +++ bool no_conflict = op->no_conflict(); +++ +++ Label update, next, none; +++ +++ bool do_null = !not_null; +++ bool exact_klass_set = exact_klass != NULL && ciTypeEntries::valid_ciklass(current_klass) == exact_klass; +++ bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set; +++ +++ assert(do_null || do_update, "why are we here?"); +++ assert(!TypeEntries::was_null_seen(current_klass) || do_update, "why are we here?"); +++ assert_different_registers(tmp, t0, t1, mdo_addr.base()); +++ +++ __ verify_oop(obj); +++ +++ if (tmp != obj) { +++ __ mv(tmp, obj); +++ } +++ if (do_null) { +++ check_null(tmp, update, current_klass, mdo_addr, do_update, next); +++#ifdef ASSERT +++ } else { +++ __ bnez(tmp, update); +++ __ stop("unexpected null obj"); +++#endif +++ } +++ +++ __ bind(update); +++ +++ if (do_update) { +++#ifdef ASSERT +++ if (exact_klass != NULL) { +++ check_exact_klass(tmp, exact_klass); +++ } +++#endif +++ if (!no_conflict) { +++ check_conflict(exact_klass, current_klass, tmp, next, none, mdo_addr); +++ } else { +++ check_no_conflict(exact_klass, current_klass, tmp, mdo_addr, next); +++ } +++ +++ __ bind(next); +++ } +++ COMMENT("} emit_profile_type"); +++} +++ +++void LIR_Assembler::align_backward_branch_target() { } +++ +++void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) { +++ // tmp must be unused +++ assert(tmp->is_illegal(), "wasting a register if tmp is allocated"); +++ +++ if (left->is_single_cpu()) { +++ assert(dest->is_single_cpu(), "expect single result reg"); +++ __ negw(dest->as_register(), left->as_register()); +++ } else if (left->is_double_cpu()) { +++ assert(dest->is_double_cpu(), "expect double result reg"); +++ __ neg(dest->as_register_lo(), left->as_register_lo()); +++ } else if (left->is_single_fpu()) { +++ assert(dest->is_single_fpu(), "expect single float result reg"); +++ __ fneg_s(dest->as_float_reg(), left->as_float_reg()); +++ } else { +++ assert(left->is_double_fpu(), "expect double float operand reg"); +++ assert(dest->is_double_fpu(), "expect double float result reg"); +++ __ fneg_d(dest->as_double_reg(), left->as_double_reg()); +++ } +++} +++ +++ +++void LIR_Assembler::leal(LIR_Opr addr, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) { +++ if (patch_code != lir_patch_none) { +++ deoptimize_trap(info); +++ return; +++ } +++ +++ LIR_Address* adr = addr->as_address_ptr(); +++ Register dst = dest->as_register_lo(); +++ +++ assert_different_registers(dst, t0); +++ if (adr->base()->is_valid() && dst == adr->base()->as_pointer_register() && (!adr->index()->is_cpu_register())) { +++ int scale = adr->scale(); +++ intptr_t offset = adr->disp(); +++ LIR_Opr index_op = adr->index(); +++ if (index_op->is_constant()) { +++ offset += ((intptr_t)index_op->as_constant_ptr()->as_jint()) << scale; +++ } +++ +++ if (!Assembler::is_simm12(offset)) { +++ __ la(t0, as_Address(adr)); +++ __ mv(dst, t0); +++ return; +++ } +++ } +++ +++ __ la(dst, as_Address(adr)); +++} +++ +++ +++void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) { +++ assert(!tmp->is_valid(), "don't need temporary"); +++ +++ CodeBlob *cb = CodeCache::find_blob(dest); +++ if (cb != NULL) { +++ __ far_call(RuntimeAddress(dest)); +++ } else { +++ RuntimeAddress target(dest); +++ __ relocate(target.rspec(), [&] { +++ int32_t offset; +++ __ la_patchable(t0, target, offset); +++ __ jalr(x1, t0, offset); +++ }); +++ } +++ +++ if (info != NULL) { +++ add_call_info_here(info); +++ } +++} +++ +++void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) { +++ if (dest->is_address() || src->is_address()) { +++ move_op(src, dest, type, lir_patch_none, info, /* pop_fpu_stack */ false, /* wide */ false, /* unaligned */ false); +++ } else { +++ ShouldNotReachHere(); +++ } +++} +++ +++#ifdef ASSERT +++// emit run-time assertion +++void LIR_Assembler::emit_assert(LIR_OpAssert* op) { +++ assert(op->code() == lir_assert, "must be"); +++ +++ Label ok; +++ if (op->in_opr1()->is_valid()) { +++ assert(op->in_opr2()->is_valid(), "both operands must be valid"); +++ bool is_unordered = false; +++ LIR_Condition cond = op->condition(); +++ emit_branch(cond, op->in_opr1(), op->in_opr2(), ok, /* is_far */ false, +++ /* is_unordered */(cond == lir_cond_greaterEqual || cond == lir_cond_greater) ? false : true); +++ } else { +++ assert(op->in_opr2()->is_illegal(), "both operands must be illegal"); +++ assert(op->condition() == lir_cond_always, "no other conditions allowed"); +++ } +++ +++ if (op->halt()) { +++ const char* str = __ code_string(op->msg()); +++ __ stop(str); +++ } else { +++ breakpoint(); +++ } +++ __ bind(ok); +++} +++#endif +++ +++#ifndef PRODUCT +++#define COMMENT(x) do { __ block_comment(x); } while (0) +++#else +++#define COMMENT(x) +++#endif +++ +++void LIR_Assembler::membar() { +++ COMMENT("membar"); +++ __ membar(MacroAssembler::AnyAny); +++} +++ +++void LIR_Assembler::membar_acquire() { +++ __ membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore); +++} +++ +++void LIR_Assembler::membar_release() { +++ __ membar(MacroAssembler::LoadStore | MacroAssembler::StoreStore); +++} +++ +++void LIR_Assembler::membar_loadload() { +++ __ membar(MacroAssembler::LoadLoad); +++} +++ +++void LIR_Assembler::membar_storestore() { +++ __ membar(MacroAssembler::StoreStore); +++} +++ +++void LIR_Assembler::membar_loadstore() { __ membar(MacroAssembler::LoadStore); } +++ +++void LIR_Assembler::membar_storeload() { __ membar(MacroAssembler::StoreLoad); } +++ +++void LIR_Assembler::on_spin_wait() { +++ Unimplemented(); +++} +++ +++void LIR_Assembler::get_thread(LIR_Opr result_reg) { +++ __ mv(result_reg->as_register(), xthread); +++} +++ +++void LIR_Assembler::peephole(LIR_List *lir) {} +++ +++void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp_op) { +++ Address addr = as_Address(src->as_address_ptr()); +++ BasicType type = src->type(); +++ bool is_oop = is_reference_type(type); +++ +++ get_op(type); +++ +++ switch (code) { +++ case lir_xadd: +++ { +++ RegisterOrConstant inc; +++ Register tmp = as_reg(tmp_op); +++ Register dst = as_reg(dest); +++ if (data->is_constant()) { +++ inc = RegisterOrConstant(as_long(data)); +++ assert_different_registers(dst, addr.base(), tmp); +++ assert_different_registers(tmp, t0); +++ } else { +++ inc = RegisterOrConstant(as_reg(data)); +++ assert_different_registers(inc.as_register(), dst, addr.base(), tmp); +++ } +++ __ la(tmp, addr); +++ (_masm->*add)(dst, inc, tmp); +++ break; +++ } +++ case lir_xchg: +++ { +++ Register tmp = tmp_op->as_register(); +++ Register obj = as_reg(data); +++ Register dst = as_reg(dest); +++ if (is_oop && UseCompressedOops) { +++ __ encode_heap_oop(t0, obj); +++ obj = t0; +++ } +++ assert_different_registers(obj, addr.base(), tmp, dst); +++ __ la(tmp, addr); +++ (_masm->*xchg)(dst, obj, tmp); +++ if (is_oop && UseCompressedOops) { +++ __ decode_heap_oop(dst); +++ } +++ } +++ break; +++ default: +++ ShouldNotReachHere(); +++ } +++ __ membar(MacroAssembler::AnyAny); +++} +++ +++int LIR_Assembler::array_element_size(BasicType type) const { +++ int elem_size = type2aelembytes(type); +++ return exact_log2(elem_size); +++} +++ +++// helper functions which checks for overflow and sets bailout if it +++// occurs. Always returns a valid embeddable pointer but in the +++// bailout case the pointer won't be to unique storage. +++address LIR_Assembler::float_constant(float f) { +++ address const_addr = __ float_constant(f); +++ if (const_addr == NULL) { +++ bailout("const section overflow"); +++ return __ code()->consts()->start(); +++ } else { +++ return const_addr; +++ } +++} +++ +++address LIR_Assembler::double_constant(double d) { +++ address const_addr = __ double_constant(d); +++ if (const_addr == NULL) { +++ bailout("const section overflow"); +++ return __ code()->consts()->start(); +++ } else { +++ return const_addr; +++ } +++} +++ +++address LIR_Assembler::int_constant(jlong n) { +++ address const_addr = __ long_constant(n); +++ if (const_addr == NULL) { +++ bailout("const section overflow"); +++ return __ code()->consts()->start(); +++ } else { +++ return const_addr; +++ } +++} +++ +++void LIR_Assembler::casw(Register addr, Register newval, Register cmpval) { +++ __ cmpxchg(addr, cmpval, newval, Assembler::int32, Assembler::aq /* acquire */, +++ Assembler::rl /* release */, t0, true /* result as bool */); +++ __ seqz(t0, t0); // cmpxchg not equal, set t0 to 1 +++ __ membar(MacroAssembler::AnyAny); +++} +++ +++void LIR_Assembler::caswu(Register addr, Register newval, Register cmpval) { +++ __ cmpxchg(addr, cmpval, newval, Assembler::uint32, Assembler::aq /* acquire */, +++ Assembler::rl /* release */, t0, true /* result as bool */); +++ __ seqz(t0, t0); // cmpxchg not equal, set t0 to 1 +++ __ membar(MacroAssembler::AnyAny); +++} +++ +++void LIR_Assembler::casl(Register addr, Register newval, Register cmpval) { +++ __ cmpxchg(addr, cmpval, newval, Assembler::int64, Assembler::aq /* acquire */, +++ Assembler::rl /* release */, t0, true /* result as bool */); +++ __ seqz(t0, t0); // cmpxchg not equal, set t0 to 1 +++ __ membar(MacroAssembler::AnyAny); +++} +++ +++void LIR_Assembler::deoptimize_trap(CodeEmitInfo *info) { +++ address target = NULL; +++ +++ switch (patching_id(info)) { +++ case PatchingStub::access_field_id: +++ target = Runtime1::entry_for(Runtime1::access_field_patching_id); +++ break; +++ case PatchingStub::load_klass_id: +++ target = Runtime1::entry_for(Runtime1::load_klass_patching_id); +++ break; +++ case PatchingStub::load_mirror_id: +++ target = Runtime1::entry_for(Runtime1::load_mirror_patching_id); +++ break; +++ case PatchingStub::load_appendix_id: +++ target = Runtime1::entry_for(Runtime1::load_appendix_patching_id); +++ break; +++ default: ShouldNotReachHere(); +++ } +++ +++ __ far_call(RuntimeAddress(target)); +++ add_call_info_here(info); +++} +++ +++void LIR_Assembler::check_exact_klass(Register tmp, ciKlass* exact_klass) { +++ Label ok; +++ __ load_klass(tmp, tmp); +++ __ mov_metadata(t0, exact_klass->constant_encoding()); +++ __ beq(tmp, t0, ok); +++ __ stop("exact klass and actual klass differ"); +++ __ bind(ok); +++} +++ +++void LIR_Assembler::get_op(BasicType type) { +++ switch (type) { +++ case T_INT: +++ xchg = &MacroAssembler::atomic_xchgalw; +++ add = &MacroAssembler::atomic_addalw; +++ break; +++ case T_LONG: +++ xchg = &MacroAssembler::atomic_xchgal; +++ add = &MacroAssembler::atomic_addal; +++ break; +++ case T_OBJECT: +++ case T_ARRAY: +++ if (UseCompressedOops) { +++ xchg = &MacroAssembler::atomic_xchgalwu; +++ add = &MacroAssembler::atomic_addalw; +++ } else { +++ xchg = &MacroAssembler::atomic_xchgal; +++ add = &MacroAssembler::atomic_addal; +++ } +++ break; +++ default: +++ ShouldNotReachHere(); +++ } +++} +++ +++// emit_opTypeCheck sub functions +++void LIR_Assembler::typecheck_lir_store(LIR_OpTypeCheck* op, bool should_profile) { +++ Register value = op->object()->as_register(); +++ Register array = op->array()->as_register(); +++ Register k_RInfo = op->tmp1()->as_register(); +++ Register klass_RInfo = op->tmp2()->as_register(); +++ Register Rtmp1 = op->tmp3()->as_register(); +++ +++ CodeStub* stub = op->stub(); +++ +++ // check if it needs to be profiled +++ ciMethodData* md = NULL; +++ ciProfileData* data = NULL; +++ +++ if (should_profile) { +++ data_check(op, &md, &data); +++ } +++ Label profile_cast_success, profile_cast_failure, done; +++ Label *success_target = should_profile ? &profile_cast_success : &done; +++ Label *failure_target = should_profile ? &profile_cast_failure : stub->entry(); +++ +++ if (should_profile) { +++ profile_object(md, data, value, klass_RInfo, &done); +++ } else { +++ __ beqz(value, done); +++ } +++ +++ add_debug_info_for_null_check_here(op->info_for_exception()); +++ __ load_klass(k_RInfo, array); +++ __ load_klass(klass_RInfo, value); +++ +++ lir_store_slowcheck(k_RInfo, klass_RInfo, Rtmp1, success_target, failure_target); +++ +++ // fall through to the success case +++ if (should_profile) { +++ Register mdo = klass_RInfo; +++ Register recv = k_RInfo; +++ __ bind(profile_cast_success); +++ __ mov_metadata(mdo, md->constant_encoding()); +++ __ load_klass(recv, value); +++ type_profile_helper(mdo, md, data, recv, &done); +++ __ j(done); +++ +++ __ bind(profile_cast_failure); +++ __ mov_metadata(mdo, md->constant_encoding()); +++ Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset())); +++ __ ld(t1, counter_addr); +++ __ addi(t1, t1, -DataLayout::counter_increment); +++ __ sd(t1, counter_addr); +++ __ j(*stub->entry()); +++ } +++ +++ __ bind(done); +++} +++ +++void LIR_Assembler::type_profile(Register obj, ciMethodData* md, Register klass_RInfo, Register k_RInfo, +++ ciProfileData* data, Label* success, Label* failure, +++ Label& profile_cast_success, Label& profile_cast_failure) { +++ Register mdo = klass_RInfo; +++ Register recv = k_RInfo; +++ __ bind(profile_cast_success); +++ __ mov_metadata(mdo, md->constant_encoding()); +++ __ load_klass(recv, obj); +++ Label update_done; +++ type_profile_helper(mdo, md, data, recv, success); +++ __ j(*success); +++ +++ __ bind(profile_cast_failure); +++ __ mov_metadata(mdo, md->constant_encoding()); +++ Address counter_addr = __ form_address(t1, mdo, md->byte_offset_of_slot(data, CounterData::count_offset())); +++ __ ld(t0, counter_addr); +++ __ addi(t0, t0, -DataLayout::counter_increment); +++ __ sd(t0, counter_addr); +++ __ j(*failure); +++} +++ +++void LIR_Assembler::lir_store_slowcheck(Register k_RInfo, Register klass_RInfo, Register Rtmp1, +++ Label* success_target, Label* failure_target) { +++ // get instance klass (it's already uncompressed) +++ __ ld(k_RInfo, Address(k_RInfo, ObjArrayKlass::element_klass_offset())); +++ // perform the fast part of the checking logic +++ __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL); +++ // call out-of-line instance of __ check_klass_subtype_slow_path(...) +++ __ addi(sp, sp, -2 * wordSize); // 2: store k_RInfo and klass_RInfo +++ __ sd(klass_RInfo, Address(sp, wordSize)); // sub klass +++ __ sd(k_RInfo, Address(sp, 0)); // super klass +++ __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id))); +++ // load result to k_RInfo +++ __ ld(k_RInfo, Address(sp, 0)); +++ __ addi(sp, sp, 2 * wordSize); // 2: pop out k_RInfo and klass_RInfo +++ // result is a boolean +++ __ beqz(k_RInfo, *failure_target, /* is_far */ true); +++} +++ +++void LIR_Assembler::const2reg_helper(LIR_Opr src) { +++ switch (src->as_constant_ptr()->type()) { +++ case T_INT: +++ case T_ADDRESS: +++ case T_OBJECT: +++ case T_ARRAY: +++ case T_METADATA: +++ const2reg(src, FrameMap::t0_opr, lir_patch_none, NULL); +++ break; +++ case T_LONG: +++ const2reg(src, FrameMap::t0_long_opr, lir_patch_none, NULL); +++ break; +++ case T_FLOAT: +++ case T_DOUBLE: +++ default: +++ ShouldNotReachHere(); +++ } +++} +++ +++void LIR_Assembler::logic_op_reg32(Register dst, Register left, Register right, LIR_Code code) { +++ switch (code) { +++ case lir_logic_and: __ andrw(dst, left, right); break; +++ case lir_logic_or: __ orrw (dst, left, right); break; +++ case lir_logic_xor: __ xorrw(dst, left, right); break; +++ default: ShouldNotReachHere(); +++ } +++} +++ +++void LIR_Assembler::logic_op_reg(Register dst, Register left, Register right, LIR_Code code) { +++ switch (code) { +++ case lir_logic_and: __ andr(dst, left, right); break; +++ case lir_logic_or: __ orr (dst, left, right); break; +++ case lir_logic_xor: __ xorr(dst, left, right); break; +++ default: ShouldNotReachHere(); +++ } +++} +++ +++void LIR_Assembler::logic_op_imm(Register dst, Register left, int right, LIR_Code code) { +++ switch (code) { +++ case lir_logic_and: __ andi(dst, left, right); break; +++ case lir_logic_or: __ ori (dst, left, right); break; +++ case lir_logic_xor: __ xori(dst, left, right); break; +++ default: ShouldNotReachHere(); +++ } +++} +++ +++void LIR_Assembler::store_parameter(Register r, int offset_from_rsp_in_words) { +++ assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp"); +++ int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord; +++ assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset"); +++ __ sd(r, Address(sp, offset_from_rsp_in_bytes)); +++} +++ +++void LIR_Assembler::store_parameter(jint c, int offset_from_rsp_in_words) { +++ assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp"); +++ int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord; +++ assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset"); +++ __ mv(t0, c); +++ __ sd(t0, Address(sp, offset_from_rsp_in_bytes)); +++} +++ +++#undef __ ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.hpp ++@@ -0,0 +1,130 @@ +++/* +++ * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2014, Red Hat Inc. All rights reserved. +++ * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#ifndef CPU_RISCV_C1_LIRASSEMBLER_RISCV_HPP +++#define CPU_RISCV_C1_LIRASSEMBLER_RISCV_HPP +++ +++// ArrayCopyStub needs access to bailout +++friend class ArrayCopyStub; +++ +++private: +++ +++#include "c1_LIRAssembler_arith_riscv.hpp" +++#include "c1_LIRAssembler_arraycopy_riscv.hpp" +++ +++ int array_element_size(BasicType type) const; +++ +++ static Register as_reg(LIR_Opr op) { +++ return op->is_double_cpu() ? op->as_register_lo() : op->as_register(); +++ } +++ +++ Address as_Address(LIR_Address* addr, Register tmp); +++ +++ // helper functions which checks for overflow and sets bailout if it +++ // occurs. Always returns a valid embeddable pointer but in the +++ // bailout case the pointer won't be to unique storage. +++ address float_constant(float f); +++ address double_constant(double d); +++ address int_constant(jlong n); +++ +++ // Ensure we have a valid Address (base + offset) to a stack-slot. +++ Address stack_slot_address(int index, uint shift, int adjust = 0); +++ +++ // Record the type of the receiver in ReceiverTypeData +++ void type_profile_helper(Register mdo, +++ ciMethodData *md, ciProfileData *data, +++ Register recv, Label* update_done); +++ +++ void casw(Register addr, Register newval, Register cmpval); +++ void caswu(Register addr, Register newval, Register cmpval); +++ void casl(Register addr, Register newval, Register cmpval); +++ +++ void poll_for_safepoint(relocInfo::relocType rtype, CodeEmitInfo* info = NULL); +++ +++ void deoptimize_trap(CodeEmitInfo *info); +++ +++ enum { +++ // See emit_static_call_stub for detail +++ // CompiledStaticCall::to_interp_stub_size() (14) + CompiledStaticCall::to_trampoline_stub_size() (1 + 3 + address) +++ _call_stub_size = 14 * NativeInstruction::instruction_size + +++ (NativeInstruction::instruction_size + NativeCallTrampolineStub::instruction_size), +++ // See emit_exception_handler for detail +++ // verify_not_null_oop + far_call + should_not_reach_here + invalidate_registers(DEBUG_ONLY) +++ _exception_handler_size = DEBUG_ONLY(584) NOT_DEBUG(548), // or smaller +++ // See emit_deopt_handler for detail +++ // auipc (1) + far_jump (6 or 2) +++ _deopt_handler_size = 1 * NativeInstruction::instruction_size + +++ 6 * NativeInstruction::instruction_size // or smaller +++ }; +++ +++ void check_conflict(ciKlass* exact_klass, intptr_t current_klass, Register tmp, +++ Label &next, Label &none, Address mdo_addr); +++ void check_no_conflict(ciKlass* exact_klass, intptr_t current_klass, Register tmp, Address mdo_addr, Label &next); +++ +++ void check_exact_klass(Register tmp, ciKlass* exact_klass); +++ +++ void check_null(Register tmp, Label &update, intptr_t current_klass, Address mdo_addr, bool do_update, Label &next); +++ +++ void (MacroAssembler::*add)(Register prev, RegisterOrConstant incr, Register addr); +++ void (MacroAssembler::*xchg)(Register prev, Register newv, Register addr); +++ +++ void get_op(BasicType type); +++ +++ // emit_typecheck_helper sub functions +++ void data_check(LIR_OpTypeCheck *op, ciMethodData **md, ciProfileData **data); +++ void typecheck_helper_slowcheck(ciKlass* k, Register obj, Register Rtmp1, +++ Register k_RInfo, Register klass_RInfo, +++ Label* failure_target, Label* success_target); +++ void profile_object(ciMethodData* md, ciProfileData* data, Register obj, +++ Register klass_RInfo, Label* obj_is_null); +++ void typecheck_loaded(LIR_OpTypeCheck* op, ciKlass* k, Register k_RInfo); +++ +++ // emit_opTypeCheck sub functions +++ void typecheck_lir_store(LIR_OpTypeCheck* op, bool should_profile); +++ +++ void type_profile(Register obj, ciMethodData* md, Register klass_RInfo, Register k_RInfo, +++ ciProfileData* data, Label* success, Label* failure, +++ Label& profile_cast_success, Label& profile_cast_failure); +++ +++ void lir_store_slowcheck(Register k_RInfo, Register klass_RInfo, Register Rtmp1, +++ Label* success_target, Label* failure_target); +++ +++ void const2reg_helper(LIR_Opr src); +++ +++ void emit_branch(LIR_Condition cmp_flag, LIR_Opr cmp1, LIR_Opr cmp2, Label& label, bool is_far, bool is_unordered); +++ +++ void logic_op_reg32(Register dst, Register left, Register right, LIR_Code code); +++ void logic_op_reg(Register dst, Register left, Register right, LIR_Code code); +++ void logic_op_imm(Register dst, Register left, int right, LIR_Code code); +++ +++public: +++ +++ void emit_cmove(LIR_Op4* op); +++ +++ void store_parameter(Register r, int offset_from_rsp_in_words); +++ void store_parameter(jint c, int offset_from_rsp_in_words); +++ +++#endif // CPU_RISCV_C1_LIRASSEMBLER_RISCV_HPP ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/c1_LIRGenerator_riscv.cpp ++@@ -0,0 +1,1086 @@ +++/* +++ * Copyright (c) 2005, 2020, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2014, Red Hat Inc. All rights reserved. +++ * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#include "precompiled.hpp" +++#include "asm/macroAssembler.inline.hpp" +++#include "c1/c1_Compilation.hpp" +++#include "c1/c1_FrameMap.hpp" +++#include "c1/c1_Instruction.hpp" +++#include "c1/c1_LIRAssembler.hpp" +++#include "c1/c1_LIRGenerator.hpp" +++#include "c1/c1_Runtime1.hpp" +++#include "c1/c1_ValueStack.hpp" +++#include "ci/ciArray.hpp" +++#include "ci/ciObjArrayKlass.hpp" +++#include "ci/ciTypeArrayKlass.hpp" +++#include "runtime/sharedRuntime.hpp" +++#include "runtime/stubRoutines.hpp" +++#include "utilities/powerOfTwo.hpp" +++#include "vmreg_riscv.inline.hpp" +++ +++#ifdef ASSERT +++#define __ gen()->lir(__FILE__, __LINE__)-> +++#else +++#define __ gen()->lir()-> +++#endif +++ +++// Item will be loaded into a byte register; Intel only +++void LIRItem::load_byte_item() { +++ load_item(); +++} +++ +++ +++void LIRItem::load_nonconstant() { +++ LIR_Opr r = value()->operand(); +++ if (r->is_constant()) { +++ _result = r; +++ } else { +++ load_item(); +++ } +++} +++ +++//-------------------------------------------------------------- +++// LIRGenerator +++//-------------------------------------------------------------- +++ +++ +++LIR_Opr LIRGenerator::exceptionOopOpr() { return FrameMap::r10_oop_opr; } +++LIR_Opr LIRGenerator::exceptionPcOpr() { return FrameMap::r13_opr; } +++LIR_Opr LIRGenerator::divInOpr() { Unimplemented(); return LIR_OprFact::illegalOpr; } +++LIR_Opr LIRGenerator::divOutOpr() { Unimplemented(); return LIR_OprFact::illegalOpr; } +++LIR_Opr LIRGenerator::remOutOpr() { Unimplemented(); return LIR_OprFact::illegalOpr; } +++LIR_Opr LIRGenerator::shiftCountOpr() { Unimplemented(); return LIR_OprFact::illegalOpr; } +++LIR_Opr LIRGenerator::syncLockOpr() { return new_register(T_INT); } +++LIR_Opr LIRGenerator::syncTempOpr() { return FrameMap::r10_opr; } +++LIR_Opr LIRGenerator::getThreadTemp() { return LIR_OprFact::illegalOpr; } +++ +++ +++LIR_Opr LIRGenerator::result_register_for(ValueType* type, bool callee) { +++ LIR_Opr opr; +++ switch (type->tag()) { +++ case intTag: opr = FrameMap::r10_opr; break; +++ case objectTag: opr = FrameMap::r10_oop_opr; break; +++ case longTag: opr = FrameMap::long10_opr; break; +++ case floatTag: opr = FrameMap::fpu10_float_opr; break; +++ case doubleTag: opr = FrameMap::fpu10_double_opr; break; +++ +++ case addressTag: // fall through +++ default: +++ ShouldNotReachHere(); +++ return LIR_OprFact::illegalOpr; +++ } +++ +++ assert(opr->type_field() == as_OprType(as_BasicType(type)), "type mismatch"); +++ return opr; +++} +++ +++ +++LIR_Opr LIRGenerator::rlock_byte(BasicType type) { +++ LIR_Opr reg = new_register(T_INT); +++ set_vreg_flag(reg, LIRGenerator::byte_reg); +++ return reg; +++} +++ +++//--------- loading items into registers -------------------------------- +++ +++ +++bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const { +++ if (v->type()->as_IntConstant() != NULL) { +++ return v->type()->as_IntConstant()->value() == 0; +++ } else if (v->type()->as_LongConstant() != NULL) { +++ return v->type()->as_LongConstant()->value() == 0; +++ } else if (v->type()->as_ObjectConstant() != NULL) { +++ return v->type()->as_ObjectConstant()->value()->is_null_object(); +++ } else if (v->type()->as_FloatConstant() != NULL) { +++ return jint_cast(v->type()->as_FloatConstant()->value()) == 0.0f; +++ } else if (v->type()->as_DoubleConstant() != NULL) { +++ return jlong_cast(v->type()->as_DoubleConstant()->value()) == 0.0; +++ } +++ return false; +++} +++ +++bool LIRGenerator::can_inline_as_constant(Value v) const { +++ if (v->type()->as_IntConstant() != NULL) { +++ int value = v->type()->as_IntConstant()->value(); +++ // "-value" must be defined for value may be used for sub +++ return Assembler::is_simm12(value) && Assembler::is_simm12(- value); +++ } else if (v->type()->as_ObjectConstant() != NULL) { +++ return v->type()->as_ObjectConstant()->value()->is_null_object(); +++ } else if (v->type()->as_LongConstant() != NULL) { +++ long value = v->type()->as_LongConstant()->value(); +++ // "-value" must be defined for value may be used for sub +++ return Assembler::is_simm12(value) && Assembler::is_simm12(- value); +++ } else if (v->type()->as_FloatConstant() != NULL) { +++ return v->type()->as_FloatConstant()->value() == 0.0f; +++ } else if (v->type()->as_DoubleConstant() != NULL) { +++ return v->type()->as_DoubleConstant()->value() == 0.0; +++ } +++ return false; +++} +++ +++bool LIRGenerator::can_inline_as_constant(LIR_Const* c) const { +++ if (c->as_constant() != NULL) { +++ long constant = 0; +++ switch (c->type()) { +++ case T_INT: constant = c->as_jint(); break; +++ case T_LONG: constant = c->as_jlong(); break; +++ default: return false; +++ } +++ // "-constant" must be defined for c may be used for sub +++ return Assembler::is_simm12(constant) && Assembler::is_simm12(- constant); +++ } +++ return false; +++} +++ +++LIR_Opr LIRGenerator::safepoint_poll_register() { +++ return LIR_OprFact::illegalOpr; +++} +++ +++LIR_Address* LIRGenerator::generate_address(LIR_Opr base, LIR_Opr index, +++ int shift, int disp, BasicType type) { +++ assert(base->is_register(), "must be"); +++ +++ if (index->is_constant()) { +++ LIR_Const *constant = index->as_constant_ptr(); +++ jlong c; +++ if (constant->type() == T_INT) { +++ c = (jlong(index->as_jint()) << shift) + disp; +++ } else { +++ assert(constant->type() == T_LONG, "should be"); +++ c = (index->as_jlong() << shift) + disp; +++ } +++ if ((jlong)((jint)c) == c) { +++ return new LIR_Address(base, (jint)c, type); +++ } else { +++ LIR_Opr tmp = new_register(T_LONG); +++ __ move(index, tmp); +++ return new LIR_Address(base, tmp, type); +++ } +++ } +++ +++ return new LIR_Address(base, index, (LIR_Address::Scale)shift, disp, type); +++} +++ +++LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr, +++ BasicType type) { +++ int offset_in_bytes = arrayOopDesc::base_offset_in_bytes(type); +++ int elem_size = type2aelembytes(type); +++ int shift = exact_log2(elem_size); +++ return generate_address(array_opr, index_opr, shift, offset_in_bytes, type); +++} +++ +++LIR_Opr LIRGenerator::load_immediate(int x, BasicType type) { +++ LIR_Opr r; +++ switch (type) { +++ case T_LONG: +++ r = LIR_OprFact::longConst(x); +++ break; +++ case T_INT: +++ r = LIR_OprFact::intConst(x); +++ break; +++ default: +++ ShouldNotReachHere(); +++ } +++ return r; +++} +++ +++void LIRGenerator::increment_counter(address counter, BasicType type, int step) { +++ LIR_Opr pointer = new_pointer_register(); +++ __ move(LIR_OprFact::intptrConst(counter), pointer); +++ LIR_Address* addr = new LIR_Address(pointer, type); +++ increment_counter(addr, step); +++} +++ +++void LIRGenerator::increment_counter(LIR_Address* addr, int step) { +++ LIR_Opr reg = new_register(addr->type()); +++ __ load(addr, reg); +++ __ add(reg, load_immediate(step, addr->type()), reg); +++ __ store(reg, addr); +++} +++ +++void LIRGenerator::cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info) { +++ LIR_Opr reg = new_register(T_INT); +++ __ load(generate_address(base, disp, T_INT), reg, info); +++ __ cmp(condition, reg, LIR_OprFact::intConst(c)); +++} +++ +++void LIRGenerator::cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Opr base, int disp, BasicType type, CodeEmitInfo* info) { +++ LIR_Opr reg1 = new_register(T_INT); +++ __ load(generate_address(base, disp, type), reg1, info); +++ __ cmp(condition, reg, reg1); +++} +++ +++bool LIRGenerator::strength_reduce_multiply(LIR_Opr left, jint c, LIR_Opr result, LIR_Opr tmp) { +++ if (tmp->is_valid() && c > 0 && c < max_jint) { +++ if (is_power_of_2(c - 1)) { +++ __ shift_left(left, exact_log2(c - 1), tmp); +++ __ add(tmp, left, result); +++ return true; +++ } else if (is_power_of_2(c + 1)) { +++ __ shift_left(left, exact_log2(c + 1), tmp); +++ __ sub(tmp, left, result); +++ return true; +++ } +++ } +++ return false; +++} +++ +++void LIRGenerator::store_stack_parameter (LIR_Opr item, ByteSize offset_from_sp) { +++ BasicType type = item->type(); +++ __ store(item, new LIR_Address(FrameMap::sp_opr, in_bytes(offset_from_sp), type)); +++} +++ +++void LIRGenerator::array_store_check(LIR_Opr value, LIR_Opr array, CodeEmitInfo* store_check_info, +++ ciMethod* profiled_method, int profiled_bci) { +++ LIR_Opr tmp1 = new_register(objectType); +++ LIR_Opr tmp2 = new_register(objectType); +++ LIR_Opr tmp3 = new_register(objectType); +++ __ store_check(value, array, tmp1, tmp2, tmp3, store_check_info, profiled_method, profiled_bci); +++} +++ +++//---------------------------------------------------------------------- +++// visitor functions +++//---------------------------------------------------------------------- +++ +++void LIRGenerator::do_MonitorEnter(MonitorEnter* x) { +++ assert(x->is_pinned(), ""); +++ LIRItem obj(x->obj(), this); +++ obj.load_item(); +++ +++ set_no_result(x); +++ +++ // "lock" stores the address of the monitor stack slot, so this is not an oop +++ LIR_Opr lock = new_register(T_INT); +++ // Need a tmp register for biased locking +++ LIR_Opr tmp = LIR_OprFact::illegalOpr; +++ if (UseBiasedLocking) { +++ tmp = new_register(T_INT); +++ } +++ +++ CodeEmitInfo* info_for_exception = NULL; +++ if (x->needs_null_check()) { +++ info_for_exception = state_for(x); +++ } +++ // this CodeEmitInfo must not have the xhandlers because here the +++ // object is already locked (xhandlers expect object to be unlocked) +++ CodeEmitInfo* info = state_for(x, x->state(), true); +++ monitor_enter(obj.result(), lock, syncTempOpr(), tmp, +++ x->monitor_no(), info_for_exception, info); +++} +++ +++void LIRGenerator::do_MonitorExit(MonitorExit* x) { +++ assert(x->is_pinned(), ""); +++ +++ LIRItem obj(x->obj(), this); +++ obj.dont_load_item(); +++ +++ LIR_Opr lock = new_register(T_INT); +++ LIR_Opr obj_temp = new_register(T_INT); +++ set_no_result(x); +++ monitor_exit(obj_temp, lock, syncTempOpr(), LIR_OprFact::illegalOpr, x->monitor_no()); +++} +++ +++// neg +++void LIRGenerator::do_NegateOp(NegateOp* x) { +++ LIRItem from(x->x(), this); +++ from.load_item(); +++ LIR_Opr result = rlock_result(x); +++ __ negate(from.result(), result); +++} +++ +++// for _fadd, _fmul, _fsub, _fdiv, _frem +++// _dadd, _dmul, _dsub, _ddiv, _drem +++void LIRGenerator::do_ArithmeticOp_FPU(ArithmeticOp* x) { +++ LIRItem left(x->x(), this); +++ LIRItem right(x->y(), this); +++ +++ if (x->op() == Bytecodes::_frem || x->op() == Bytecodes::_drem) { +++ +++ // float remainder is implemented as a direct call into the runtime +++ BasicTypeList signature(2); +++ if (x->op() == Bytecodes::_frem) { +++ signature.append(T_FLOAT); +++ signature.append(T_FLOAT); +++ } else { +++ signature.append(T_DOUBLE); +++ signature.append(T_DOUBLE); +++ } +++ CallingConvention* cc = frame_map()->c_calling_convention(&signature); +++ +++ const LIR_Opr result_reg = result_register_for(x->type()); +++ +++ left.load_item(); +++ __ move(left.result(), cc->at(0)); +++ right.load_item_force(cc->at(1)); +++ +++ address entry; +++ if (x->op() == Bytecodes::_frem) { +++ entry = CAST_FROM_FN_PTR(address, SharedRuntime::frem); +++ } else { +++ entry = CAST_FROM_FN_PTR(address, SharedRuntime::drem); +++ } +++ +++ LIR_Opr result = rlock_result(x); +++ __ call_runtime_leaf(entry, getThreadTemp(), result_reg, cc->args()); +++ __ move(result_reg, result); +++ +++ return; +++ } +++ +++ if (!left.is_register()) { +++ left.load_item(); +++ } +++ // Always load right hand side. +++ right.load_item(); +++ +++ LIR_Opr reg = rlock(x); +++ arithmetic_op_fpu(x->op(), reg, left.result(), right.result()); +++ +++ set_result(x, round_item(reg)); +++} +++ +++// for _ladd, _lmul, _lsub, _ldiv, _lrem +++void LIRGenerator::do_ArithmeticOp_Long(ArithmeticOp* x) { +++ +++ // missing test if instr is commutative and if we should swap +++ LIRItem left(x->x(), this); +++ LIRItem right(x->y(), this); +++ +++ if (x->op() == Bytecodes::_ldiv || x->op() == Bytecodes::_lrem) { +++ +++ left.load_item(); +++ +++ bool need_zero_check = true; +++ if (right.is_constant()) { +++ jlong c = right.get_jlong_constant(); +++ // no need to do div-by-zero check if the divisor is a non-zero constant +++ if (c != 0) { need_zero_check = false; } +++ // do not load right if the divisor is a power-of-2 constant +++ if (c > 0 && is_power_of_2(c)) { +++ right.dont_load_item(); +++ } else { +++ right.load_item(); +++ } +++ } else { +++ right.load_item(); +++ } +++ if (need_zero_check) { +++ CodeEmitInfo* info = state_for(x); +++ __ cmp(lir_cond_equal, right.result(), LIR_OprFact::longConst(0)); +++ __ branch(lir_cond_equal, new DivByZeroStub(info)); +++ } +++ +++ rlock_result(x); +++ switch (x->op()) { +++ case Bytecodes::_lrem: +++ __ rem(left.result(), right.result(), x->operand()); +++ break; +++ case Bytecodes::_ldiv: +++ __ div(left.result(), right.result(), x->operand()); +++ break; +++ default: +++ ShouldNotReachHere(); +++ } +++ } else { +++ assert(x->op() == Bytecodes::_lmul || x->op() == Bytecodes::_ladd || x->op() == Bytecodes::_lsub, +++ "expect lmul, ladd or lsub"); +++ // add, sub, mul +++ left.load_item(); +++ if (!right.is_register()) { +++ if (x->op() == Bytecodes::_lmul || +++ !right.is_constant() || +++ (x->op() == Bytecodes::_ladd && +++ !Assembler::is_simm12(right.get_jlong_constant())) || +++ (x->op() == Bytecodes::_lsub && +++ !Assembler::is_simm12(-right.get_jlong_constant()))) { +++ right.load_item(); +++ } else { // add, sub +++ assert(x->op() == Bytecodes::_ladd || x->op() == Bytecodes::_lsub, "expected ladd or lsub"); +++ // don't load constants to save register +++ right.load_nonconstant(); +++ } +++ } +++ rlock_result(x); +++ arithmetic_op_long(x->op(), x->operand(), left.result(), right.result(), NULL); +++ } +++} +++ +++// for: _iadd, _imul, _isub, _idiv, _irem +++void LIRGenerator::do_ArithmeticOp_Int(ArithmeticOp* x) { +++ +++ // Test if instr is commutative and if we should swap +++ LIRItem left(x->x(), this); +++ LIRItem right(x->y(), this); +++ LIRItem* left_arg = &left; +++ LIRItem* right_arg = &right; +++ if (x->is_commutative() && left.is_stack() && right.is_register()) { +++ // swap them if left is real stack (or cached) and right is real register(not cached) +++ left_arg = &right; +++ right_arg = &left; +++ } +++ left_arg->load_item(); +++ // do not need to load right, as we can handle stack and constants +++ if (x->op() == Bytecodes::_idiv || x->op() == Bytecodes::_irem) { +++ +++ rlock_result(x); +++ +++ bool need_zero_check = true; +++ if (right.is_constant()) { +++ jint c = right.get_jint_constant(); +++ // no need to do div-by-zero check if the divisor is a non-zero constant +++ if (c != 0) { need_zero_check = false; } +++ // do not load right if the divisor is a power-of-2 constant +++ if (c > 0 && is_power_of_2(c)) { +++ right_arg->dont_load_item(); +++ } else { +++ right_arg->load_item(); +++ } +++ } else { +++ right_arg->load_item(); +++ } +++ if (need_zero_check) { +++ CodeEmitInfo* info = state_for(x); +++ __ cmp(lir_cond_equal, right_arg->result(), LIR_OprFact::longConst(0)); +++ __ branch(lir_cond_equal, new DivByZeroStub(info)); +++ } +++ +++ LIR_Opr ill = LIR_OprFact::illegalOpr; +++ if (x->op() == Bytecodes::_irem) { +++ __ irem(left_arg->result(), right_arg->result(), x->operand(), ill, NULL); +++ } else if (x->op() == Bytecodes::_idiv) { +++ __ idiv(left_arg->result(), right_arg->result(), x->operand(), ill, NULL); +++ } +++ +++ } else if (x->op() == Bytecodes::_iadd || x->op() == Bytecodes::_isub) { +++ if (right.is_constant() && +++ ((x->op() == Bytecodes::_iadd && !Assembler::is_simm12(right.get_jint_constant())) || +++ (x->op() == Bytecodes::_isub && !Assembler::is_simm12(-right.get_jint_constant())))) { +++ right.load_nonconstant(); +++ } else { +++ right.load_item(); +++ } +++ rlock_result(x); +++ arithmetic_op_int(x->op(), x->operand(), left_arg->result(), right_arg->result(), LIR_OprFact::illegalOpr); +++ } else { +++ assert (x->op() == Bytecodes::_imul, "expect imul"); +++ if (right.is_constant()) { +++ jint c = right.get_jint_constant(); +++ if (c > 0 && c < max_jint && (is_power_of_2(c) || is_power_of_2(c - 1) || is_power_of_2(c + 1))) { +++ right_arg->dont_load_item(); +++ } else { +++ // Cannot use constant op. +++ right_arg->load_item(); +++ } +++ } else { +++ right.load_item(); +++ } +++ rlock_result(x); +++ arithmetic_op_int(x->op(), x->operand(), left_arg->result(), right_arg->result(), new_register(T_INT)); +++ } +++} +++ +++void LIRGenerator::do_ArithmeticOp(ArithmeticOp* x) { +++ // when an operand with use count 1 is the left operand, then it is +++ // likely that no move for 2-operand-LIR-form is necessary +++ if (x->is_commutative() && x->y()->as_Constant() == NULL && x->x()->use_count() > x->y()->use_count()) { +++ x->swap_operands(); +++ } +++ +++ ValueTag tag = x->type()->tag(); +++ assert(x->x()->type()->tag() == tag && x->y()->type()->tag() == tag, "wrong parameters"); +++ switch (tag) { +++ case floatTag: +++ case doubleTag: do_ArithmeticOp_FPU(x); return; +++ case longTag: do_ArithmeticOp_Long(x); return; +++ case intTag: do_ArithmeticOp_Int(x); return; +++ default: ShouldNotReachHere(); return; +++ } +++} +++ +++// _ishl, _lshl, _ishr, _lshr, _iushr, _lushr +++void LIRGenerator::do_ShiftOp(ShiftOp* x) { +++ LIRItem value(x->x(), this); +++ LIRItem count(x->y(), this); +++ +++ value.load_item(); +++ if (count.is_constant()) { +++ assert(count.type()->as_IntConstant() != NULL || count.type()->as_LongConstant() != NULL , "should be"); +++ count.dont_load_item(); +++ } else { +++ count.load_item(); +++ } +++ +++ LIR_Opr res = rlock_result(x); +++ shift_op(x->op(), res, value.result(), count.result(), LIR_OprFact::illegalOpr); +++} +++ +++ +++// _iand, _land, _ior, _lor, _ixor, _lxor +++void LIRGenerator::do_LogicOp(LogicOp* x) { +++ +++ LIRItem left(x->x(), this); +++ LIRItem right(x->y(), this); +++ +++ left.load_item(); +++ rlock_result(x); +++ ValueTag tag = right.type()->tag(); +++ if (right.is_constant() && +++ ((tag == longTag && Assembler::is_simm12(right.get_jlong_constant())) || +++ (tag == intTag && Assembler::is_simm12(right.get_jint_constant())))) { +++ right.dont_load_item(); +++ } else { +++ right.load_item(); +++ } +++ +++ switch (x->op()) { +++ case Bytecodes::_iand: // fall through +++ case Bytecodes::_land: +++ __ logical_and(left.result(), right.result(), x->operand()); break; +++ case Bytecodes::_ior: // fall through +++ case Bytecodes::_lor: +++ __ logical_or(left.result(), right.result(), x->operand()); break; +++ case Bytecodes::_ixor: // fall through +++ case Bytecodes::_lxor: +++ __ logical_xor(left.result(), right.result(), x->operand()); break; +++ default: Unimplemented(); +++ } +++} +++ +++// _lcmp, _fcmpl, _fcmpg, _dcmpl, _dcmpg +++void LIRGenerator::do_CompareOp(CompareOp* x) { +++ LIRItem left(x->x(), this); +++ LIRItem right(x->y(), this); +++ ValueTag tag = x->x()->type()->tag(); +++ if (tag == longTag) { +++ left.set_destroys_register(); +++ } +++ left.load_item(); +++ right.load_item(); +++ LIR_Opr reg = rlock_result(x); +++ +++ if (x->x()->type()->is_float_kind()) { +++ Bytecodes::Code code = x->op(); +++ __ fcmp2int(left.result(), right.result(), reg, (code == Bytecodes::_fcmpl || code == Bytecodes::_dcmpl)); +++ } else if (x->x()->type()->tag() == longTag) { +++ __ lcmp2int(left.result(), right.result(), reg); +++ } else { +++ Unimplemented(); +++ } +++} +++ +++LIR_Opr LIRGenerator::atomic_cmpxchg(BasicType type, LIR_Opr addr, LIRItem& cmp_value, LIRItem& new_value) { +++ LIR_Opr ill = LIR_OprFact::illegalOpr; // for convenience +++ new_value.load_item(); +++ cmp_value.load_item(); +++ LIR_Opr result = new_register(T_INT); +++ if (is_reference_type(type)) { +++ __ cas_obj(addr, cmp_value.result(), new_value.result(), new_register(T_INT), new_register(T_INT), result); +++ } else if (type == T_INT) { +++ __ cas_int(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), ill, ill); +++ } else if (type == T_LONG) { +++ __ cas_long(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), ill, ill); +++ } else { +++ ShouldNotReachHere(); +++ } +++ __ logical_xor(FrameMap::r5_opr, LIR_OprFact::intConst(1), result); +++ return result; +++} +++ +++LIR_Opr LIRGenerator::atomic_xchg(BasicType type, LIR_Opr addr, LIRItem& value) { +++ bool is_oop = is_reference_type(type); +++ LIR_Opr result = new_register(type); +++ value.load_item(); +++ assert(type == T_INT || is_oop LP64_ONLY( || type == T_LONG ), "unexpected type"); +++ LIR_Opr tmp = new_register(T_INT); +++ __ xchg(addr, value.result(), result, tmp); +++ return result; +++} +++ +++LIR_Opr LIRGenerator::atomic_add(BasicType type, LIR_Opr addr, LIRItem& value) { +++ LIR_Opr result = new_register(type); +++ value.load_item(); +++ assert(type == T_INT LP64_ONLY( || type == T_LONG ), "unexpected type"); +++ LIR_Opr tmp = new_register(T_INT); +++ __ xadd(addr, value.result(), result, tmp); +++ return result; +++} +++ +++void LIRGenerator::do_MathIntrinsic(Intrinsic* x) { +++ assert(x->number_of_arguments() == 1 || (x->number_of_arguments() == 2 && x->id() == vmIntrinsics::_dpow), +++ "wrong type"); +++ +++ switch (x->id()) { +++ case vmIntrinsics::_dexp: // fall through +++ case vmIntrinsics::_dlog: // fall through +++ case vmIntrinsics::_dpow: // fall through +++ case vmIntrinsics::_dcos: // fall through +++ case vmIntrinsics::_dsin: // fall through +++ case vmIntrinsics::_dtan: // fall through +++ case vmIntrinsics::_dlog10: +++ do_LibmIntrinsic(x); +++ break; +++ case vmIntrinsics::_dabs: // fall through +++ case vmIntrinsics::_dsqrt: { +++ assert(x->number_of_arguments() == 1, "wrong type"); +++ LIRItem value(x->argument_at(0), this); +++ value.load_item(); +++ LIR_Opr dst = rlock_result(x); +++ +++ switch (x->id()) { +++ case vmIntrinsics::_dsqrt: { +++ __ sqrt(value.result(), dst, LIR_OprFact::illegalOpr); +++ break; +++ } +++ case vmIntrinsics::_dabs: { +++ __ abs(value.result(), dst, LIR_OprFact::illegalOpr); +++ break; +++ } +++ default: +++ ShouldNotReachHere(); +++ } +++ break; +++ } +++ default: +++ ShouldNotReachHere(); +++ } +++} +++ +++void LIRGenerator::do_LibmIntrinsic(Intrinsic* x) { +++ LIRItem value(x->argument_at(0), this); +++ value.set_destroys_register(); +++ +++ LIR_Opr calc_result = rlock_result(x); +++ LIR_Opr result_reg = result_register_for(x->type()); +++ +++ CallingConvention* cc = NULL; +++ +++ if (x->id() == vmIntrinsics::_dpow) { +++ LIRItem value1(x->argument_at(1), this); +++ +++ value1.set_destroys_register(); +++ +++ BasicTypeList signature(2); +++ signature.append(T_DOUBLE); +++ signature.append(T_DOUBLE); +++ cc = frame_map()->c_calling_convention(&signature); +++ value.load_item_force(cc->at(0)); +++ value1.load_item_force(cc->at(1)); +++ } else { +++ BasicTypeList signature(1); +++ signature.append(T_DOUBLE); +++ cc = frame_map()->c_calling_convention(&signature); +++ value.load_item_force(cc->at(0)); +++ } +++ +++ switch (x->id()) { +++ case vmIntrinsics::_dexp: +++ if (StubRoutines::dexp() != NULL) { __ call_runtime_leaf(StubRoutines::dexp(), getThreadTemp(), result_reg, cc->args()); } +++ else { __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dexp), getThreadTemp(), result_reg, cc->args()); } +++ break; +++ case vmIntrinsics::_dlog: +++ if (StubRoutines::dlog() != NULL) { __ call_runtime_leaf(StubRoutines::dlog(), getThreadTemp(), result_reg, cc->args()); } +++ else { __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dlog), getThreadTemp(), result_reg, cc->args()); } +++ break; +++ case vmIntrinsics::_dlog10: +++ if (StubRoutines::dlog10() != NULL) { __ call_runtime_leaf(StubRoutines::dlog10(), getThreadTemp(), result_reg, cc->args()); } +++ else { __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dlog10), getThreadTemp(), result_reg, cc->args()); } +++ break; +++ case vmIntrinsics::_dsin: +++ if (StubRoutines::dsin() != NULL) { __ call_runtime_leaf(StubRoutines::dsin(), getThreadTemp(), result_reg, cc->args()); } +++ else { __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dsin), getThreadTemp(), result_reg, cc->args()); } +++ break; +++ case vmIntrinsics::_dcos: +++ if (StubRoutines::dcos() != NULL) { __ call_runtime_leaf(StubRoutines::dcos(), getThreadTemp(), result_reg, cc->args()); } +++ else { __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dcos), getThreadTemp(), result_reg, cc->args()); } +++ break; +++ case vmIntrinsics::_dtan: +++ if (StubRoutines::dtan() != NULL) { __ call_runtime_leaf(StubRoutines::dtan(), getThreadTemp(), result_reg, cc->args()); } +++ else { __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtan), getThreadTemp(), result_reg, cc->args()); } +++ break; +++ case vmIntrinsics::_dpow: +++ if (StubRoutines::dpow() != NULL) { __ call_runtime_leaf(StubRoutines::dpow(), getThreadTemp(), result_reg, cc->args()); } +++ else { __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dpow), getThreadTemp(), result_reg, cc->args()); } +++ break; +++ default: ShouldNotReachHere(); +++ } +++ __ move(result_reg, calc_result); +++} +++ +++ +++void LIRGenerator::do_ArrayCopy(Intrinsic* x) { +++ assert(x->number_of_arguments() == 5, "wrong type"); +++ +++ // Make all state_for calls early since they can emit code +++ CodeEmitInfo* info = state_for(x, x->state()); +++ +++ LIRItem src(x->argument_at(0), this); +++ LIRItem src_pos(x->argument_at(1), this); +++ LIRItem dst(x->argument_at(2), this); +++ LIRItem dst_pos(x->argument_at(3), this); +++ LIRItem length(x->argument_at(4), this); +++ +++ // operands for arraycopy must use fixed registers, otherwise +++ // LinearScan will fail allocation (because arraycopy always needs a +++ // call) +++ +++ // The java calling convention will give us enough registers +++ // so that on the stub side the args will be perfect already. +++ // On the other slow/special case side we call C and the arg +++ // positions are not similar enough to pick one as the best. +++ // Also because the java calling convention is a "shifted" version +++ // of the C convention we can process the java args trivially into C +++ // args without worry of overwriting during the xfer +++ +++ src.load_item_force (FrameMap::as_oop_opr(j_rarg0)); +++ src_pos.load_item_force (FrameMap::as_opr(j_rarg1)); +++ dst.load_item_force (FrameMap::as_oop_opr(j_rarg2)); +++ dst_pos.load_item_force (FrameMap::as_opr(j_rarg3)); +++ length.load_item_force (FrameMap::as_opr(j_rarg4)); +++ +++ LIR_Opr tmp = FrameMap::as_opr(j_rarg5); +++ +++ set_no_result(x); +++ +++ int flags; +++ ciArrayKlass* expected_type = NULL; +++ arraycopy_helper(x, &flags, &expected_type); +++ +++ __ arraycopy(src.result(), src_pos.result(), dst.result(), dst_pos.result(), length.result(), tmp, +++ expected_type, flags, info); // does add_safepoint +++} +++ +++void LIRGenerator::do_update_CRC32(Intrinsic* x) { +++ ShouldNotReachHere(); +++} +++ +++void LIRGenerator::do_update_CRC32C(Intrinsic* x) { +++ ShouldNotReachHere(); +++} +++ +++void LIRGenerator::do_FmaIntrinsic(Intrinsic* x) { +++ assert(x->number_of_arguments() == 3, "wrong type"); +++ assert(UseFMA, "Needs FMA instructions support."); +++ LIRItem value(x->argument_at(0), this); +++ LIRItem value1(x->argument_at(1), this); +++ LIRItem value2(x->argument_at(2), this); +++ +++ value.load_item(); +++ value1.load_item(); +++ value2.load_item(); +++ +++ LIR_Opr calc_input = value.result(); +++ LIR_Opr calc_input1 = value1.result(); +++ LIR_Opr calc_input2 = value2.result(); +++ LIR_Opr calc_result = rlock_result(x); +++ +++ switch (x->id()) { +++ case vmIntrinsics::_fmaD: __ fmad(calc_input, calc_input1, calc_input2, calc_result); break; +++ case vmIntrinsics::_fmaF: __ fmaf(calc_input, calc_input1, calc_input2, calc_result); break; +++ default: ShouldNotReachHere(); +++ } +++} +++ +++void LIRGenerator::do_vectorizedMismatch(Intrinsic* x) { +++ fatal("vectorizedMismatch intrinsic is not implemented on this platform"); +++} +++ +++// _i2l, _i2f, _i2d, _l2i, _l2f, _l2d, _f2i, _f2l, _f2d, _d2i, _d2l, _d2f +++// _i2b, _i2c, _i2s +++void LIRGenerator::do_Convert(Convert* x) { +++ LIRItem value(x->value(), this); +++ value.load_item(); +++ LIR_Opr input = value.result(); +++ LIR_Opr result = rlock(x); +++ +++ // arguments of lir_convert +++ LIR_Opr conv_input = input; +++ LIR_Opr conv_result = result; +++ +++ __ convert(x->op(), conv_input, conv_result); +++ +++ assert(result->is_virtual(), "result must be virtual register"); +++ set_result(x, result); +++} +++ +++void LIRGenerator::do_NewInstance(NewInstance* x) { +++#ifndef PRODUCT +++ if (PrintNotLoaded && !x->klass()->is_loaded()) { +++ tty->print_cr(" ###class not loaded at new bci %d", x->printable_bci()); +++ } +++#endif +++ CodeEmitInfo* info = state_for(x, x->state()); +++ LIR_Opr reg = result_register_for(x->type()); +++ new_instance(reg, x->klass(), x->is_unresolved(), +++ FrameMap::r12_oop_opr, +++ FrameMap::r15_oop_opr, +++ FrameMap::r14_oop_opr, +++ LIR_OprFact::illegalOpr, +++ FrameMap::r13_metadata_opr, +++ info); +++ LIR_Opr result = rlock_result(x); +++ __ move(reg, result); +++} +++ +++void LIRGenerator::do_NewTypeArray(NewTypeArray* x) { +++ CodeEmitInfo* info = state_for(x, x->state()); +++ +++ LIRItem length(x->length(), this); +++ length.load_item_force(FrameMap::r9_opr); +++ +++ LIR_Opr reg = result_register_for(x->type()); +++ LIR_Opr tmp1 = FrameMap::r12_oop_opr; +++ LIR_Opr tmp2 = FrameMap::r14_oop_opr; +++ LIR_Opr tmp3 = FrameMap::r15_oop_opr; +++ LIR_Opr tmp4 = reg; +++ LIR_Opr klass_reg = FrameMap::r13_metadata_opr; +++ LIR_Opr len = length.result(); +++ BasicType elem_type = x->elt_type(); +++ +++ __ metadata2reg(ciTypeArrayKlass::make(elem_type)->constant_encoding(), klass_reg); +++ +++ CodeStub* slow_path = new NewTypeArrayStub(klass_reg, len, reg, info); +++ __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, elem_type, klass_reg, slow_path); +++ +++ LIR_Opr result = rlock_result(x); +++ __ move(reg, result); +++} +++ +++void LIRGenerator::do_NewObjectArray(NewObjectArray* x) { +++ LIRItem length(x->length(), this); +++ // in case of patching (i.e., object class is not yet loaded), we need to reexecute the instruction +++ // and therefore provide the state before the parameters have been consumed +++ CodeEmitInfo* patching_info = NULL; +++ if (!x->klass()->is_loaded() || PatchALot) { +++ patching_info = state_for(x, x->state_before()); +++ } +++ +++ CodeEmitInfo* info = state_for(x, x->state()); +++ +++ LIR_Opr reg = result_register_for(x->type()); +++ LIR_Opr tmp1 = FrameMap::r12_oop_opr; +++ LIR_Opr tmp2 = FrameMap::r14_oop_opr; +++ LIR_Opr tmp3 = FrameMap::r15_oop_opr; +++ LIR_Opr tmp4 = reg; +++ LIR_Opr klass_reg = FrameMap::r13_metadata_opr; +++ +++ length.load_item_force(FrameMap::r9_opr); +++ LIR_Opr len = length.result(); +++ +++ CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info); +++ ciKlass* obj = (ciKlass*) ciObjArrayKlass::make(x->klass()); +++ if (obj == ciEnv::unloaded_ciobjarrayklass()) { +++ BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error"); +++ } +++ klass2reg_with_patching(klass_reg, obj, patching_info); +++ __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path); +++ +++ LIR_Opr result = rlock_result(x); +++ __ move(reg, result); +++} +++ +++ +++void LIRGenerator::do_NewMultiArray(NewMultiArray* x) { +++ Values* dims = x->dims(); +++ int i = dims->length(); +++ LIRItemList* items = new LIRItemList(i, i, NULL); +++ while (i-- > 0) { +++ LIRItem* size = new LIRItem(dims->at(i), this); +++ items->at_put(i, size); +++ } +++ +++ // Evaluate state_for early since it may emit code. +++ CodeEmitInfo* patching_info = NULL; +++ if (!x->klass()->is_loaded() || PatchALot) { +++ patching_info = state_for(x, x->state_before()); +++ +++ // Cannot re-use same xhandlers for multiple CodeEmitInfos, so +++ // clone all handlers (NOTE: Usually this is handled transparently +++ // by the CodeEmitInfo cloning logic in CodeStub constructors but +++ // is done explicitly here because a stub isn't being used). +++ x->set_exception_handlers(new XHandlers(x->exception_handlers())); +++ } +++ CodeEmitInfo* info = state_for(x, x->state()); +++ +++ i = dims->length(); +++ while (i-- > 0) { +++ LIRItem* size = items->at(i); +++ size->load_item(); +++ +++ store_stack_parameter(size->result(), in_ByteSize(i * BytesPerInt)); +++ } +++ +++ LIR_Opr klass_reg = FrameMap::r10_metadata_opr; +++ klass2reg_with_patching(klass_reg, x->klass(), patching_info); +++ +++ LIR_Opr rank = FrameMap::r9_opr; +++ __ move(LIR_OprFact::intConst(x->rank()), rank); +++ LIR_Opr varargs = FrameMap::r12_opr; +++ __ move(FrameMap::sp_opr, varargs); +++ LIR_OprList* args = new LIR_OprList(3); +++ args->append(klass_reg); +++ args->append(rank); +++ args->append(varargs); +++ LIR_Opr reg = result_register_for(x->type()); +++ __ call_runtime(Runtime1::entry_for(Runtime1::new_multi_array_id), +++ LIR_OprFact::illegalOpr, +++ reg, args, info); +++ +++ LIR_Opr result = rlock_result(x); +++ __ move(reg, result); +++} +++ +++void LIRGenerator::do_BlockBegin(BlockBegin* x) { +++ // nothing to do for now +++} +++ +++void LIRGenerator::do_CheckCast(CheckCast* x) { +++ LIRItem obj(x->obj(), this); +++ +++ CodeEmitInfo* patching_info = NULL; +++ if (!x->klass()->is_loaded() || +++ (PatchALot && !x->is_incompatible_class_change_check() && !x->is_invokespecial_receiver_check())) { +++ // must do this before locking the destination register as an oop register, +++ // and before the obj is loaded (the latter is for deoptimization) +++ patching_info = state_for(x, x->state_before()); +++ } +++ obj.load_item(); +++ +++ // info for exceptions +++ CodeEmitInfo* info_for_exception = +++ (x->needs_exception_state() ? state_for(x) : +++ state_for(x, x->state_before(), true /*ignore_xhandler*/ )); +++ +++ CodeStub* stub = NULL; +++ if (x->is_incompatible_class_change_check()) { +++ assert(patching_info == NULL, "can't patch this"); +++ stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, +++ info_for_exception); +++ } else if (x->is_invokespecial_receiver_check()) { +++ assert(patching_info == NULL, "can't patch this"); +++ stub = new DeoptimizeStub(info_for_exception, +++ Deoptimization::Reason_class_check, +++ Deoptimization::Action_none); +++ } else { +++ stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, obj.result(), info_for_exception); +++ } +++ LIR_Opr reg = rlock_result(x); +++ LIR_Opr tmp3 = LIR_OprFact::illegalOpr; +++ if (!x->klass()->is_loaded() || UseCompressedClassPointers) { +++ tmp3 = new_register(objectType); +++ } +++ __ checkcast(reg, obj.result(), x->klass(), +++ new_register(objectType), new_register(objectType), tmp3, +++ x->direct_compare(), info_for_exception, patching_info, stub, +++ x->profiled_method(), x->profiled_bci()); +++} +++ +++void LIRGenerator::do_InstanceOf(InstanceOf* x) { +++ LIRItem obj(x->obj(), this); +++ +++ // result and test object may not be in same register +++ LIR_Opr reg = rlock_result(x); +++ CodeEmitInfo* patching_info = NULL; +++ if ((!x->klass()->is_loaded() || PatchALot)) { +++ // must do this before locking the destination register as an oop register +++ patching_info = state_for(x, x->state_before()); +++ } +++ obj.load_item(); +++ LIR_Opr tmp3 = LIR_OprFact::illegalOpr; +++ if (!x->klass()->is_loaded() || UseCompressedClassPointers) { +++ tmp3 = new_register(objectType); +++ } +++ __ instanceof(reg, obj.result(), x->klass(), +++ new_register(objectType), new_register(objectType), tmp3, +++ x->direct_compare(), patching_info, x->profiled_method(), x->profiled_bci()); +++} +++ +++void LIRGenerator::do_If(If* x) { +++ // If should have two successors +++ assert(x->number_of_sux() == 2, "inconsistency"); +++ ValueTag tag = x->x()->type()->tag(); +++ bool is_safepoint = x->is_safepoint(); +++ +++ If::Condition cond = x->cond(); +++ +++ LIRItem xitem(x->x(), this); +++ LIRItem yitem(x->y(), this); +++ LIRItem* xin = &xitem; +++ LIRItem* yin = &yitem; +++ +++ if (tag == longTag) { +++ // for longs, only conditions "eql", "neq", "lss", "geq" are valid; +++ // mirror for other conditions +++ if (cond == If::gtr || cond == If::leq) { +++ cond = Instruction::mirror(cond); +++ xin = &yitem; +++ yin = &xitem; +++ } +++ xin->set_destroys_register(); +++ } +++ xin->load_item(); +++ yin->load_item(); +++ +++ set_no_result(x); +++ +++ LIR_Opr left = xin->result(); +++ LIR_Opr right = yin->result(); +++ +++ // add safepoint before generating condition code so it can be recomputed +++ if (x->is_safepoint()) { +++ // increment backedge counter if needed +++ increment_backedge_counter_conditionally(lir_cond(cond), left, right, state_for(x, x->state_before()), +++ x->tsux()->bci(), x->fsux()->bci(), x->profiled_bci()); +++ __ safepoint(LIR_OprFact::illegalOpr, state_for(x, x->state_before())); +++ } +++ +++ // Generate branch profiling. Profiling code doesn't kill flags. +++ __ cmp(lir_cond(cond), left, right); +++ profile_branch(x, cond); +++ move_to_phi(x->state()); +++ if (x->x()->type()->is_float_kind()) { +++ __ branch(lir_cond(cond), x->tsux(), x->usux()); +++ } else { +++ __ branch(lir_cond(cond), x->tsux()); +++ } +++ assert(x->default_sux() == x->fsux(), "wrong destination above"); +++ __ jump(x->default_sux()); +++} +++ +++LIR_Opr LIRGenerator::getThreadPointer() { +++ return FrameMap::as_pointer_opr(xthread); +++} +++ +++void LIRGenerator::trace_block_entry(BlockBegin* block) { Unimplemented(); } +++ +++void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address, +++ CodeEmitInfo* info) { +++ __ volatile_store_mem_reg(value, address, info); +++} +++ +++void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result, +++ CodeEmitInfo* info) { +++ __ volatile_load_mem_reg(address, result, info); +++} ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/c1_LIR_riscv.cpp ++@@ -0,0 +1,55 @@ +++/* +++ * Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#include "precompiled.hpp" +++#include "asm/register.hpp" +++#include "c1/c1_LIR.hpp" +++ +++FloatRegister LIR_OprDesc::as_float_reg() const { +++ return as_FloatRegister(fpu_regnr()); +++} +++ +++FloatRegister LIR_OprDesc::as_double_reg() const { +++ return as_FloatRegister(fpu_regnrLo()); +++} +++ +++// Reg2 unused. +++LIR_Opr LIR_OprFact::double_fpu(int reg1, int reg2) { +++ assert(as_FloatRegister(reg2) == fnoreg, "Not used on this platform"); +++ return (LIR_Opr)(intptr_t)((reg1 << LIR_OprDesc::reg1_shift) | +++ (reg1 << LIR_OprDesc::reg2_shift) | +++ LIR_OprDesc::double_type | +++ LIR_OprDesc::fpu_register | +++ LIR_OprDesc::double_size); +++} +++ +++#ifndef PRODUCT +++void LIR_Address::verify() const { +++ assert(base()->is_cpu_register(), "wrong base operand"); +++ assert(index()->is_illegal() || index()->is_double_cpu() || index()->is_single_cpu(), "wrong index operand"); +++ assert(base()->type() == T_ADDRESS || base()->type() == T_OBJECT || base()->type() == T_LONG || +++ base()->type() == T_METADATA, "wrong type for addresses"); +++} +++#endif // PRODUCT ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/c1_LinearScan_riscv.cpp ++@@ -0,0 +1,33 @@ +++/* +++ * Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#include "precompiled.hpp" +++#include "c1/c1_Instruction.hpp" +++#include "c1/c1_LinearScan.hpp" +++#include "utilities/bitMap.inline.hpp" +++ +++void LinearScan::allocate_fpu_stack() { +++ // No FPU stack on RISCV +++} ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/c1_LinearScan_riscv.hpp ++@@ -0,0 +1,83 @@ +++/* +++ * Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2014, Red Hat Inc. All rights reserved. +++ * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#ifndef CPU_RISCV_C1_LINEARSCAN_RISCV_HPP +++#define CPU_RISCV_C1_LINEARSCAN_RISCV_HPP +++ +++inline bool LinearScan::is_processed_reg_num(int reg_num) +++{ +++ return reg_num <= FrameMap::last_cpu_reg() || reg_num >= pd_nof_cpu_regs_frame_map; +++} +++ +++inline int LinearScan::num_physical_regs(BasicType type) { +++ return 1; +++} +++ +++inline bool LinearScan::requires_adjacent_regs(BasicType type) { +++ return false; +++} +++ +++inline bool LinearScan::is_caller_save(int assigned_reg) { +++ assert(assigned_reg >= 0 && assigned_reg < nof_regs, "should call this only for registers"); +++ if (assigned_reg < pd_first_callee_saved_reg) { +++ return true; +++ } +++ if (assigned_reg > pd_last_callee_saved_reg && assigned_reg < pd_first_callee_saved_fpu_reg_1) { +++ return true; +++ } +++ if (assigned_reg > pd_last_callee_saved_fpu_reg_1 && assigned_reg < pd_first_callee_saved_fpu_reg_2) { +++ return true; +++ } +++ if (assigned_reg > pd_last_callee_saved_fpu_reg_2 && assigned_reg < pd_last_fpu_reg) { +++ return true; +++ } +++ return false; +++} +++ +++inline void LinearScan::pd_add_temps(LIR_Op* op) { +++ // No special case behaviours yet +++} +++ +++ +++// Implementation of LinearScanWalker +++ +++inline bool LinearScanWalker::pd_init_regs_for_alloc(Interval* cur) +++{ +++ if (allocator()->gen()->is_vreg_flag_set(cur->reg_num(), LIRGenerator::callee_saved)) { +++ assert(cur->type() != T_FLOAT && cur->type() != T_DOUBLE, "cpu regs only"); +++ _first_reg = pd_first_callee_saved_reg; +++ _last_reg = pd_last_callee_saved_reg; +++ return true; +++ } else if (cur->type() == T_INT || cur->type() == T_LONG || cur->type() == T_OBJECT || +++ cur->type() == T_ADDRESS || cur->type() == T_METADATA) { +++ _first_reg = pd_first_cpu_reg; +++ _last_reg = pd_last_allocatable_cpu_reg; +++ return true; +++ } +++ return false; +++} +++ +++#endif // CPU_RISCV_C1_LINEARSCAN_RISCV_HPP ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/c1_MacroAssembler_riscv.cpp ++@@ -0,0 +1,451 @@ +++/* +++ * Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2014, Red Hat Inc. All rights reserved. +++ * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#include "precompiled.hpp" +++#include "c1/c1_LIR.hpp" +++#include "c1/c1_MacroAssembler.hpp" +++#include "c1/c1_Runtime1.hpp" +++#include "classfile/systemDictionary.hpp" +++#include "gc/shared/barrierSetAssembler.hpp" +++#include "gc/shared/collectedHeap.hpp" +++#include "interpreter/interpreter.hpp" +++#include "oops/arrayOop.hpp" +++#include "oops/markWord.hpp" +++#include "runtime/basicLock.hpp" +++#include "runtime/os.hpp" +++#include "runtime/sharedRuntime.hpp" +++#include "runtime/stubRoutines.hpp" +++ +++void C1_MacroAssembler::float_cmp(bool is_float, int unordered_result, +++ FloatRegister freg0, FloatRegister freg1, +++ Register result) { +++ if (is_float) { +++ float_compare(result, freg0, freg1, unordered_result); +++ } else { +++ double_compare(result, freg0, freg1, unordered_result); +++ } +++} +++ +++int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr, Register tmp, Label& slow_case) { +++ const int aligned_mask = BytesPerWord - 1; +++ const int hdr_offset = oopDesc::mark_offset_in_bytes(); +++ assert(hdr != obj && hdr != disp_hdr && obj != disp_hdr, "registers must be different"); +++ Label done; +++ int null_check_offset = -1; +++ +++ verify_oop(obj); +++ +++ // save object being locked into the BasicObjectLock +++ sd(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes())); +++ +++ null_check_offset = offset(); +++ +++ if (DiagnoseSyncOnValueBasedClasses != 0) { +++ load_klass(hdr, obj); +++ lwu(hdr, Address(hdr, Klass::access_flags_offset())); +++ test_bit(t0, hdr, exact_log2(JVM_ACC_IS_VALUE_BASED_CLASS)); +++ bnez(t0, slow_case, true /* is_far */); +++ } +++ +++ if (UseBiasedLocking) { +++ assert(tmp != noreg, "should have tmp register at this point"); +++ biased_locking_enter(disp_hdr, obj, hdr, tmp, false, done, &slow_case); +++ } +++ +++ // Load object header +++ ld(hdr, Address(obj, hdr_offset)); +++ // and mark it as unlocked +++ ori(hdr, hdr, markWord::unlocked_value); +++ // save unlocked object header into the displaced header location on the stack +++ sd(hdr, Address(disp_hdr, 0)); +++ // test if object header is still the same (i.e. unlocked), and if so, store the +++ // displaced header address in the object header - if it is not the same, get the +++ // object header instead +++ la(t1, Address(obj, hdr_offset)); +++ cmpxchgptr(hdr, disp_hdr, t1, t0, done, /*fallthough*/NULL); +++ // if the object header was the same, we're done +++ // if the object header was not the same, it is now in the hdr register +++ // => test if it is a stack pointer into the same stack (recursive locking), i.e.: +++ // +++ // 1) (hdr & aligned_mask) == 0 +++ // 2) sp <= hdr +++ // 3) hdr <= sp + page_size +++ // +++ // these 3 tests can be done by evaluating the following expression: +++ // +++ // (hdr -sp) & (aligned_mask - page_size) +++ // +++ // assuming both the stack pointer and page_size have their least +++ // significant 2 bits cleared and page_size is a power of 2 +++ sub(hdr, hdr, sp); +++ mv(t0, aligned_mask - os::vm_page_size()); +++ andr(hdr, hdr, t0); +++ // for recursive locking, the result is zero => save it in the displaced header +++ // location (NULL in the displaced hdr location indicates recursive locking) +++ sd(hdr, Address(disp_hdr, 0)); +++ // otherwise we don't care about the result and handle locking via runtime call +++ bnez(hdr, slow_case, /* is_far */ true); +++ bind(done); +++ return null_check_offset; +++} +++ +++void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_hdr, Label& slow_case) { +++ const int aligned_mask = BytesPerWord - 1; +++ const int hdr_offset = oopDesc::mark_offset_in_bytes(); +++ assert(hdr != obj && hdr != disp_hdr && obj != disp_hdr, "registers must be different"); +++ Label done; +++ +++ if (UseBiasedLocking) { +++ // load object +++ ld(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes())); +++ biased_locking_exit(obj, hdr, done); +++ } +++ +++ // load displaced header +++ ld(hdr, Address(disp_hdr, 0)); +++ // if the loaded hdr is NULL we had recursive locking +++ // if we had recursive locking, we are done +++ beqz(hdr, done); +++ if (!UseBiasedLocking) { +++ // load object +++ ld(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes())); +++ } +++ verify_oop(obj); +++ // test if object header is pointing to the displaced header, and if so, restore +++ // the displaced header in the object - if the object header is not pointing to +++ // the displaced header, get the object header instead +++ // if the object header was not pointing to the displaced header, +++ // we do unlocking via runtime call +++ if (hdr_offset) { +++ la(t0, Address(obj, hdr_offset)); +++ cmpxchgptr(disp_hdr, hdr, t0, t1, done, &slow_case); +++ } else { +++ cmpxchgptr(disp_hdr, hdr, obj, t1, done, &slow_case); +++ } +++ bind(done); +++} +++ +++// Defines obj, preserves var_size_in_bytes +++void C1_MacroAssembler::try_allocate(Register obj, Register var_size_in_bytes, int con_size_in_bytes, Register tmp1, Register tmp2, Label& slow_case) { +++ if (UseTLAB) { +++ tlab_allocate(obj, var_size_in_bytes, con_size_in_bytes, tmp1, tmp2, slow_case, /* is_far */ true); +++ } else { +++ eden_allocate(obj, var_size_in_bytes, con_size_in_bytes, tmp1, slow_case, /* is_far */ true); +++ } +++} +++ +++void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register len, Register tmp1, Register tmp2) { +++ assert_different_registers(obj, klass, len); +++ if (UseBiasedLocking & !len->is_valid()) { +++ assert_different_registers(obj, klass, len, tmp1, tmp2); +++ ld(tmp1, Address(klass, Klass::prototype_header_offset())); +++ } else { +++ // This assumes that all prototype bits fitr in an int32_t +++ mv(tmp1, (int32_t)(intptr_t)markWord::prototype().value()); +++ } +++ sd(tmp1, Address(obj, oopDesc::mark_offset_in_bytes())); +++ +++ if (UseCompressedClassPointers) { // Take care not to kill klass +++ encode_klass_not_null(tmp1, klass, tmp2); +++ sw(tmp1, Address(obj, oopDesc::klass_offset_in_bytes())); +++ } else { +++ sd(klass, Address(obj, oopDesc::klass_offset_in_bytes())); +++ } +++ +++ if (len->is_valid()) { +++ sw(len, Address(obj, arrayOopDesc::length_offset_in_bytes())); +++ } else if (UseCompressedClassPointers) { +++ store_klass_gap(obj, zr); +++ } +++} +++ +++// preserves obj, destroys len_in_bytes +++void C1_MacroAssembler::initialize_body(Register obj, Register len_in_bytes, int hdr_size_in_bytes, Register tmp) { +++ assert(hdr_size_in_bytes >= 0, "header size must be positive or 0"); +++ Label done; +++ +++ // len_in_bytes is positive and ptr sized +++ sub(len_in_bytes, len_in_bytes, hdr_size_in_bytes); +++ beqz(len_in_bytes, done); +++ +++ // Preserve obj +++ if (hdr_size_in_bytes) { +++ add(obj, obj, hdr_size_in_bytes); +++ } +++ zero_memory(obj, len_in_bytes, tmp); +++ if (hdr_size_in_bytes) { +++ sub(obj, obj, hdr_size_in_bytes); +++ } +++ +++ bind(done); +++} +++ +++void C1_MacroAssembler::allocate_object(Register obj, Register tmp1, Register tmp2, int header_size, int object_size, Register klass, Label& slow_case) { +++ assert_different_registers(obj, tmp1, tmp2); +++ assert(header_size >= 0 && object_size >= header_size, "illegal sizes"); +++ +++ try_allocate(obj, noreg, object_size * BytesPerWord, tmp1, tmp2, slow_case); +++ +++ initialize_object(obj, klass, noreg, object_size * HeapWordSize, tmp1, tmp2, UseTLAB); +++} +++ +++void C1_MacroAssembler::initialize_object(Register obj, Register klass, Register var_size_in_bytes, int con_size_in_bytes, Register tmp1, Register tmp2, bool is_tlab_allocated) { +++ assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0, +++ "con_size_in_bytes is not multiple of alignment"); +++ const int hdr_size_in_bytes = instanceOopDesc::header_size() * HeapWordSize; +++ +++ initialize_header(obj, klass, noreg, tmp1, tmp2); +++ +++ if (!(UseTLAB && ZeroTLAB && is_tlab_allocated)) { +++ // clear rest of allocated space +++ const Register index = tmp2; +++ // 16: multipler for threshold +++ const int threshold = 16 * BytesPerWord; // approximate break even point for code size (see comments below) +++ if (var_size_in_bytes != noreg) { +++ mv(index, var_size_in_bytes); +++ initialize_body(obj, index, hdr_size_in_bytes, tmp1); +++ } else if (con_size_in_bytes <= threshold) { +++ // use explicit null stores +++ int i = hdr_size_in_bytes; +++ if (i < con_size_in_bytes && (con_size_in_bytes % (2 * BytesPerWord))) { // 2: multipler for BytesPerWord +++ sd(zr, Address(obj, i)); +++ i += BytesPerWord; +++ } +++ for (; i < con_size_in_bytes; i += BytesPerWord) { +++ sd(zr, Address(obj, i)); +++ } +++ } else if (con_size_in_bytes > hdr_size_in_bytes) { +++ block_comment("zero memory"); +++ // use loop to null out the fields +++ int words = (con_size_in_bytes - hdr_size_in_bytes) / BytesPerWord; +++ mv(index, words / 8); // 8: byte size +++ +++ const int unroll = 8; // Number of sd(zr) instructions we'll unroll +++ int remainder = words % unroll; +++ la(t0, Address(obj, hdr_size_in_bytes + remainder * BytesPerWord)); +++ +++ Label entry_point, loop; +++ j(entry_point); +++ +++ bind(loop); +++ sub(index, index, 1); +++ for (int i = -unroll; i < 0; i++) { +++ if (-i == remainder) { +++ bind(entry_point); +++ } +++ sd(zr, Address(t0, i * wordSize)); +++ } +++ if (remainder == 0) { +++ bind(entry_point); +++ } +++ add(t0, t0, unroll * wordSize); +++ bnez(index, loop); +++ } +++ } +++ +++ membar(MacroAssembler::StoreStore); +++ +++ if (CURRENT_ENV->dtrace_alloc_probes()) { +++ assert(obj == x10, "must be"); +++ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::dtrace_object_alloc_id))); +++ } +++ +++ verify_oop(obj); +++} +++ +++void C1_MacroAssembler::allocate_array(Register obj, Register len, Register tmp1, Register tmp2, int header_size, int f, Register klass, Label& slow_case) { +++ assert_different_registers(obj, len, tmp1, tmp2, klass); +++ +++ // determine alignment mask +++ assert(!(BytesPerWord & 1), "must be multiple of 2 for masking code to work"); +++ +++ // check for negative or excessive length +++ mv(t0, (int32_t)max_array_allocation_length); +++ bgeu(len, t0, slow_case, /* is_far */ true); +++ +++ const Register arr_size = tmp2; // okay to be the same +++ // align object end +++ mv(arr_size, (int32_t)header_size * BytesPerWord + MinObjAlignmentInBytesMask); +++ shadd(arr_size, len, arr_size, t0, f); +++ andi(arr_size, arr_size, ~(uint)MinObjAlignmentInBytesMask); +++ +++ try_allocate(obj, arr_size, 0, tmp1, tmp2, slow_case); +++ +++ initialize_header(obj, klass, len, tmp1, tmp2); +++ +++ // clear rest of allocated space +++ const Register len_zero = len; +++ initialize_body(obj, arr_size, header_size * BytesPerWord, len_zero); +++ +++ membar(MacroAssembler::StoreStore); +++ +++ if (CURRENT_ENV->dtrace_alloc_probes()) { +++ assert(obj == x10, "must be"); +++ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::dtrace_object_alloc_id))); +++ } +++ +++ verify_oop(obj); +++} +++ +++void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache, Label &L) { +++ verify_oop(receiver); +++ // explicit NULL check not needed since load from [klass_offset] causes a trap +++ // check against inline cache +++ assert(!MacroAssembler::needs_explicit_null_check(oopDesc::klass_offset_in_bytes()), "must add explicit null check"); +++ assert_different_registers(receiver, iCache, t0, t2); +++ cmp_klass(receiver, iCache, t0, t2 /* call-clobbered t2 as a tmp */, L); +++} +++ +++void C1_MacroAssembler::build_frame(int framesize, int bang_size_in_bytes) { +++ assert(bang_size_in_bytes >= framesize, "stack bang size incorrect"); +++ // Make sure there is enough stack space for this method's activation. +++ // Note that we do this before creating a frame. +++ generate_stack_overflow_check(bang_size_in_bytes); +++ MacroAssembler::build_frame(framesize); +++ +++ // Insert nmethod entry barrier into frame. +++ BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); +++ bs->nmethod_entry_barrier(this); +++} +++ +++void C1_MacroAssembler::remove_frame(int framesize) { +++ MacroAssembler::remove_frame(framesize); +++} +++ +++ +++void C1_MacroAssembler::verified_entry(bool breakAtEntry) { +++ // If we have to make this method not-entrant we'll overwrite its +++ // first instruction with a jump. For this action to be legal we +++ // must ensure that this first instruction is a J, JAL or NOP. +++ // Make it a NOP. +++ IncompressibleRegion ir(this); // keep the nop as 4 bytes for patching. +++ assert_alignment(pc()); +++ nop(); // 4 bytes +++} +++ +++void C1_MacroAssembler::load_parameter(int offset_in_words, Register reg) { +++ // fp + -2: link +++ // + -1: return address +++ // + 0: argument with offset 0 +++ // + 1: argument with offset 1 +++ // + 2: ... +++ ld(reg, Address(fp, offset_in_words * BytesPerWord)); +++} +++ +++#ifndef PRODUCT +++ +++void C1_MacroAssembler::verify_stack_oop(int stack_offset) { +++ if (!VerifyOops) { +++ return; +++ } +++ verify_oop_addr(Address(sp, stack_offset), "oop"); +++} +++ +++void C1_MacroAssembler::verify_not_null_oop(Register r) { +++ if (!VerifyOops) return; +++ Label not_null; +++ bnez(r, not_null); +++ stop("non-null oop required"); +++ bind(not_null); +++ verify_oop(r); +++} +++ +++void C1_MacroAssembler::invalidate_registers(bool inv_x10, bool inv_x9, bool inv_x12, bool inv_x13, bool inv_x14, bool inv_x15) { +++#ifdef ASSERT +++ static int nn; +++ if (inv_x10) { mv(x10, 0xDEAD); } +++ if (inv_x9) { mv(x9, 0xDEAD); } +++ if (inv_x12) { mv(x12, nn++); } +++ if (inv_x13) { mv(x13, 0xDEAD); } +++ if (inv_x14) { mv(x14, 0xDEAD); } +++ if (inv_x15) { mv(x15, 0xDEAD); } +++#endif // ASSERT +++} +++#endif // ifndef PRODUCT +++ +++typedef void (C1_MacroAssembler::*c1_cond_branch_insn)(Register op1, Register op2, Label& label, bool is_far); +++typedef void (C1_MacroAssembler::*c1_float_cond_branch_insn)(FloatRegister op1, FloatRegister op2, +++ Label& label, bool is_far, bool is_unordered); +++ +++static c1_cond_branch_insn c1_cond_branch[] = +++{ +++ /* SHORT branches */ +++ (c1_cond_branch_insn)&MacroAssembler::beq, +++ (c1_cond_branch_insn)&MacroAssembler::bne, +++ (c1_cond_branch_insn)&MacroAssembler::blt, +++ (c1_cond_branch_insn)&MacroAssembler::ble, +++ (c1_cond_branch_insn)&MacroAssembler::bge, +++ (c1_cond_branch_insn)&MacroAssembler::bgt, +++ (c1_cond_branch_insn)&MacroAssembler::bleu, // lir_cond_belowEqual +++ (c1_cond_branch_insn)&MacroAssembler::bgeu // lir_cond_aboveEqual +++}; +++ +++static c1_float_cond_branch_insn c1_float_cond_branch[] = +++{ +++ /* FLOAT branches */ +++ (c1_float_cond_branch_insn)&MacroAssembler::float_beq, +++ (c1_float_cond_branch_insn)&MacroAssembler::float_bne, +++ (c1_float_cond_branch_insn)&MacroAssembler::float_blt, +++ (c1_float_cond_branch_insn)&MacroAssembler::float_ble, +++ (c1_float_cond_branch_insn)&MacroAssembler::float_bge, +++ (c1_float_cond_branch_insn)&MacroAssembler::float_bgt, +++ NULL, // lir_cond_belowEqual +++ NULL, // lir_cond_aboveEqual +++ +++ /* DOUBLE branches */ +++ (c1_float_cond_branch_insn)&MacroAssembler::double_beq, +++ (c1_float_cond_branch_insn)&MacroAssembler::double_bne, +++ (c1_float_cond_branch_insn)&MacroAssembler::double_blt, +++ (c1_float_cond_branch_insn)&MacroAssembler::double_ble, +++ (c1_float_cond_branch_insn)&MacroAssembler::double_bge, +++ (c1_float_cond_branch_insn)&MacroAssembler::double_bgt, +++ NULL, // lir_cond_belowEqual +++ NULL // lir_cond_aboveEqual +++}; +++ +++void C1_MacroAssembler::c1_cmp_branch(int cmpFlag, Register op1, Register op2, Label& label, +++ BasicType type, bool is_far) { +++ if (type == T_OBJECT || type == T_ARRAY) { +++ assert(cmpFlag == lir_cond_equal || cmpFlag == lir_cond_notEqual, "Should be equal or notEqual"); +++ if (cmpFlag == lir_cond_equal) { +++ beq(op1, op2, label, is_far); +++ } else { +++ bne(op1, op2, label, is_far); +++ } +++ } else { +++ assert(cmpFlag >= 0 && cmpFlag < (int)(sizeof(c1_cond_branch) / sizeof(c1_cond_branch[0])), +++ "invalid c1 conditional branch index"); +++ (this->*c1_cond_branch[cmpFlag])(op1, op2, label, is_far); +++ } +++} +++ +++void C1_MacroAssembler::c1_float_cmp_branch(int cmpFlag, FloatRegister op1, FloatRegister op2, Label& label, +++ bool is_far, bool is_unordered) { +++ assert(cmpFlag >= 0 && +++ cmpFlag < (int)(sizeof(c1_float_cond_branch) / sizeof(c1_float_cond_branch[0])), +++ "invalid c1 float conditional branch index"); +++ (this->*c1_float_cond_branch[cmpFlag])(op1, op2, label, is_far, is_unordered); +++} ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/c1_MacroAssembler_riscv.hpp ++@@ -0,0 +1,121 @@ +++/* +++ * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2014, 2015, Red Hat Inc. All rights reserved. +++ * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#ifndef CPU_RISCV_C1_MACROASSEMBLER_RISCV_HPP +++#define CPU_RISCV_C1_MACROASSEMBLER_RISCV_HPP +++ +++using MacroAssembler::build_frame; +++using MacroAssembler::null_check; +++ +++// C1_MacroAssembler contains high-level macros for C1 +++ +++ private: +++ int _rsp_offset; // track rsp changes +++ // initialization +++ void pd_init() { _rsp_offset = 0; } +++ +++ +++ public: +++ void try_allocate( +++ Register obj, // result: pointer to object after successful allocation +++ Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise +++ int con_size_in_bytes, // object size in bytes if known at compile time +++ Register tmp1, // temp register +++ Register tmp2, // temp register +++ Label& slow_case // continuation point if fast allocation fails +++ ); +++ +++ void initialize_header(Register obj, Register klass, Register len, Register tmp1, Register tmp2); +++ void initialize_body(Register obj, Register len_in_bytes, int hdr_size_in_bytes, Register tmp); +++ +++ void float_cmp(bool is_float, int unordered_result, +++ FloatRegister f0, FloatRegister f1, +++ Register result); +++ +++ // locking +++ // hdr : must be x10, contents destroyed +++ // obj : must point to the object to lock, contents preserved +++ // disp_hdr: must point to the displaced header location, contents preserved +++ // tmp : temporary register, contents destroyed +++ // returns code offset at which to add null check debug information +++ int lock_object (Register swap, Register obj, Register disp_hdr, Register tmp, Label& slow_case); +++ +++ // unlocking +++ // hdr : contents destroyed +++ // obj : must point to the object to lock, contents preserved +++ // disp_hdr: must be x10 & must point to the displaced header location, contents destroyed +++ void unlock_object(Register swap, Register obj, Register lock, Label& slow_case); +++ +++ void initialize_object( +++ Register obj, // result: pointer to object after successful allocation +++ Register klass, // object klass +++ Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise +++ int con_size_in_bytes, // object size in bytes if known at compile time +++ Register tmp1, // temp register +++ Register tmp2, // temp register +++ bool is_tlab_allocated // the object was allocated in a TLAB; relevant for the implementation of ZeroTLAB +++ ); +++ +++ // allocation of fixed-size objects +++ // (can also be used to allocate fixed-size arrays, by setting +++ // hdr_size correctly and storing the array length afterwards) +++ // obj : will contain pointer to allocated object +++ // t1, t2 : temp registers - contents destroyed +++ // header_size: size of object header in words +++ // object_size: total size of object in words +++ // slow_case : exit to slow case implementation if fast allocation fails +++ void allocate_object(Register obj, Register tmp1, Register tmp2, int header_size, int object_size, Register klass, Label& slow_case); +++ +++ enum { +++ max_array_allocation_length = 0x00FFFFFF +++ }; +++ +++ // allocation of arrays +++ // obj : will contain pointer to allocated object +++ // len : array length in number of elements +++ // t : temp register - contents destroyed +++ // header_size: size of object header in words +++ // f : element scale factor +++ // slow_case : exit to slow case implementation if fast allocation fails +++ void allocate_array(Register obj, Register len, Register tmp1, Register tmp2, int header_size, int f, Register klass, Label& slow_case); +++ +++ int rsp_offset() const { return _rsp_offset; } +++ +++ void invalidate_registers(bool inv_r0, bool inv_r19, bool inv_r2, bool inv_r3, bool inv_r4, bool inv_r5) PRODUCT_RETURN; +++ +++ // This platform only uses signal-based null checks. The Label is not needed. +++ void null_check(Register r, Label *Lnull = NULL) { MacroAssembler::null_check(r); } +++ +++ void load_parameter(int offset_in_words, Register reg); +++ +++ void inline_cache_check(Register receiver, Register iCache, Label &L); +++ +++ static const int c1_double_branch_mask = 1 << 3; // depend on c1_float_cond_branch +++ void c1_cmp_branch(int cmpFlag, Register op1, Register op2, Label& label, BasicType type, bool is_far); +++ void c1_float_cmp_branch(int cmpFlag, FloatRegister op1, FloatRegister op2, Label& label, +++ bool is_far, bool is_unordered = false); +++ +++#endif // CPU_RISCV_C1_MACROASSEMBLER_RISCV_HPP ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/c1_Runtime1_riscv.cpp ++@@ -0,0 +1,1179 @@ +++/* +++ * Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2014, Red Hat Inc. All rights reserved. +++ * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#include "precompiled.hpp" +++#include "asm/assembler.hpp" +++#include "c1/c1_CodeStubs.hpp" +++#include "c1/c1_Defs.hpp" +++#include "c1/c1_MacroAssembler.hpp" +++#include "c1/c1_Runtime1.hpp" +++#include "compiler/disassembler.hpp" +++#include "compiler/oopMap.hpp" +++#include "gc/shared/cardTable.hpp" +++#include "gc/shared/cardTableBarrierSet.hpp" +++#include "gc/shared/collectedHeap.hpp" +++#include "interpreter/interpreter.hpp" +++#include "memory/universe.hpp" +++#include "nativeInst_riscv.hpp" +++#include "oops/compiledICHolder.hpp" +++#include "oops/oop.inline.hpp" +++#include "prims/jvmtiExport.hpp" +++#include "register_riscv.hpp" +++#include "runtime/sharedRuntime.hpp" +++#include "runtime/signature.hpp" +++#include "runtime/stubRoutines.hpp" +++#include "runtime/vframe.hpp" +++#include "runtime/vframeArray.hpp" +++#include "utilities/powerOfTwo.hpp" +++#include "vmreg_riscv.inline.hpp" +++ +++ +++// Implementation of StubAssembler +++ +++int StubAssembler::call_RT(Register oop_result, Register metadata_result, address entry, int args_size) { +++ // setup registers +++ assert(!(oop_result->is_valid() || metadata_result->is_valid()) || oop_result != metadata_result, +++ "registers must be different"); +++ assert(oop_result != xthread && metadata_result != xthread, "registers must be different"); +++ assert(args_size >= 0, "illegal args_size"); +++ bool align_stack = false; +++ +++ mv(c_rarg0, xthread); +++ set_num_rt_args(0); // Nothing on stack +++ +++ Label retaddr; +++ set_last_Java_frame(sp, fp, retaddr, t0); +++ +++ // do the call +++ RuntimeAddress target(entry); +++ relocate(target.rspec(), [&] { +++ int32_t offset; +++ la_patchable(t0, target, offset); +++ jalr(x1, t0, offset); +++ }); +++ bind(retaddr); +++ int call_offset = offset(); +++ // verify callee-saved register +++#ifdef ASSERT +++ push_reg(x10, sp); +++ { Label L; +++ get_thread(x10); +++ beq(xthread, x10, L); +++ stop("StubAssembler::call_RT: xthread not callee saved?"); +++ bind(L); +++ } +++ pop_reg(x10, sp); +++#endif +++ reset_last_Java_frame(true); +++ +++ // check for pending exceptions +++ { Label L; +++ // check for pending exceptions (java_thread is set upon return) +++ ld(t0, Address(xthread, in_bytes(Thread::pending_exception_offset()))); +++ beqz(t0, L); +++ // exception pending => remove activation and forward to exception handler +++ // make sure that the vm_results are cleared +++ if (oop_result->is_valid()) { +++ sd(zr, Address(xthread, JavaThread::vm_result_offset())); +++ } +++ if (metadata_result->is_valid()) { +++ sd(zr, Address(xthread, JavaThread::vm_result_2_offset())); +++ } +++ if (frame_size() == no_frame_size) { +++ leave(); +++ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry())); +++ } else if (_stub_id == Runtime1::forward_exception_id) { +++ should_not_reach_here(); +++ } else { +++ far_jump(RuntimeAddress(Runtime1::entry_for(Runtime1::forward_exception_id))); +++ } +++ bind(L); +++ } +++ // get oop results if there are any and reset the values in the thread +++ if (oop_result->is_valid()) { +++ get_vm_result(oop_result, xthread); +++ } +++ if (metadata_result->is_valid()) { +++ get_vm_result_2(metadata_result, xthread); +++ } +++ return call_offset; +++} +++ +++int StubAssembler::call_RT(Register oop_result, Register metadata_result, address entry, Register arg1) { +++ mv(c_rarg1, arg1); +++ return call_RT(oop_result, metadata_result, entry, 1); +++} +++ +++int StubAssembler::call_RT(Register oop_result, Register metadata_result, address entry, Register arg1, Register arg2) { +++ const int arg_num = 2; +++ if (c_rarg1 == arg2) { +++ if (c_rarg2 == arg1) { +++ xorr(arg1, arg1, arg2); +++ xorr(arg2, arg1, arg2); +++ xorr(arg1, arg1, arg2); +++ } else { +++ mv(c_rarg2, arg2); +++ mv(c_rarg1, arg1); +++ } +++ } else { +++ mv(c_rarg1, arg1); +++ mv(c_rarg2, arg2); +++ } +++ return call_RT(oop_result, metadata_result, entry, arg_num); +++} +++ +++int StubAssembler::call_RT(Register oop_result, Register metadata_result, address entry, Register arg1, Register arg2, Register arg3) { +++ const int arg_num = 3; +++ // if there is any conflict use the stack +++ if (arg1 == c_rarg2 || arg1 == c_rarg3 || +++ arg2 == c_rarg1 || arg2 == c_rarg3 || +++ arg3 == c_rarg1 || arg3 == c_rarg2) { +++ const int arg1_sp_offset = 0; +++ const int arg2_sp_offset = 1; +++ const int arg3_sp_offset = 2; +++ addi(sp, sp, -(arg_num + 1) * wordSize); +++ sd(arg1, Address(sp, arg1_sp_offset * wordSize)); +++ sd(arg2, Address(sp, arg2_sp_offset * wordSize)); +++ sd(arg3, Address(sp, arg3_sp_offset * wordSize)); +++ +++ ld(c_rarg1, Address(sp, arg1_sp_offset * wordSize)); +++ ld(c_rarg2, Address(sp, arg2_sp_offset * wordSize)); +++ ld(c_rarg3, Address(sp, arg3_sp_offset * wordSize)); +++ addi(sp, sp, (arg_num + 1) * wordSize); +++ } else { +++ mv(c_rarg1, arg1); +++ mv(c_rarg2, arg2); +++ mv(c_rarg3, arg3); +++ } +++ return call_RT(oop_result, metadata_result, entry, arg_num); +++} +++ +++enum return_state_t { +++ does_not_return, requires_return +++}; +++ +++// Implementation of StubFrame +++ +++class StubFrame: public StackObj { +++ private: +++ StubAssembler* _sasm; +++ bool _return_state; +++ +++ public: +++ StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments, return_state_t return_state=requires_return); +++ void load_argument(int offset_in_words, Register reg); +++ +++ ~StubFrame(); +++};; +++ +++void StubAssembler::prologue(const char* name, bool must_gc_arguments) { +++ set_info(name, must_gc_arguments); +++ enter(); +++} +++ +++void StubAssembler::epilogue() { +++ leave(); +++ ret(); +++} +++ +++#define __ _sasm-> +++ +++StubFrame::StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments, return_state_t return_state) { +++ _sasm = sasm; +++ _return_state = return_state; +++ __ prologue(name, must_gc_arguments); +++} +++ +++// load parameters that were stored with LIR_Assembler::store_parameter +++// Note: offsets for store_parameter and load_argument must match +++void StubFrame::load_argument(int offset_in_words, Register reg) { +++ __ load_parameter(offset_in_words, reg); +++} +++ +++ +++StubFrame::~StubFrame() { +++ if (_return_state == requires_return) { +++ __ epilogue(); +++ } else { +++ __ should_not_reach_here(); +++ } +++ _sasm = NULL; +++} +++ +++#undef __ +++ +++ +++// Implementation of Runtime1 +++ +++#define __ sasm-> +++ +++const int float_regs_as_doubles_size_in_slots = pd_nof_fpu_regs_frame_map * 2; +++ +++// Stack layout for saving/restoring all the registers needed during a runtime +++// call (this includes deoptimization) +++// Note: note that users of this frame may well have arguments to some runtime +++// while these values are on the stack. These positions neglect those arguments +++// but the code in save_live_registers will take the argument count into +++// account. +++// +++ +++enum reg_save_layout { +++ reg_save_frame_size = 32 /* float */ + 30 /* integer excluding x3, x4 */ +++}; +++ +++// Save off registers which might be killed by calls into the runtime. +++// Tries to smart of about FPU registers. In particular we separate +++// saving and describing the FPU registers for deoptimization since we +++// have to save the FPU registers twice if we describe them. The +++// deopt blob is the only thing which needs to describe FPU registers. +++// In all other cases it should be sufficient to simply save their +++// current value. +++ +++static int cpu_reg_save_offsets[FrameMap::nof_cpu_regs]; +++static int fpu_reg_save_offsets[FrameMap::nof_fpu_regs]; +++ +++static OopMap* generate_oop_map(StubAssembler* sasm, bool save_fpu_registers) { +++ int frame_size_in_bytes = reg_save_frame_size * BytesPerWord; +++ sasm->set_frame_size(frame_size_in_bytes / BytesPerWord); +++ int frame_size_in_slots = frame_size_in_bytes / sizeof(jint); +++ OopMap* oop_map = new OopMap(frame_size_in_slots, 0); +++ assert_cond(oop_map != NULL); +++ +++ // caller save registers only, see FrameMap::initialize +++ // in c1_FrameMap_riscv.cpp for detail. +++ const static Register caller_save_cpu_regs[FrameMap::max_nof_caller_save_cpu_regs] = { +++ x7, x10, x11, x12, x13, x14, x15, x16, x17, x28, x29, x30, x31 +++ }; +++ +++ for (int i = 0; i < FrameMap::max_nof_caller_save_cpu_regs; i++) { +++ Register r = caller_save_cpu_regs[i]; +++ int sp_offset = cpu_reg_save_offsets[r->encoding()]; +++ oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset), +++ r->as_VMReg()); +++ } +++ +++ // fpu_regs +++ if (save_fpu_registers) { +++ for (int i = 0; i < FrameMap::nof_fpu_regs; i++) { +++ FloatRegister r = as_FloatRegister(i); +++ int sp_offset = fpu_reg_save_offsets[i]; +++ oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset), +++ r->as_VMReg()); +++ } +++ } +++ return oop_map; +++} +++ +++static OopMap* save_live_registers(StubAssembler* sasm, +++ bool save_fpu_registers = true) { +++ __ block_comment("save_live_registers"); +++ +++ // if the number of pushed regs is odd, one slot will be reserved for alignment +++ __ push_reg(RegSet::range(x5, x31), sp); // integer registers except ra(x1) & sp(x2) & gp(x3) & tp(x4) +++ +++ if (save_fpu_registers) { +++ // float registers +++ __ addi(sp, sp, -(FrameMap::nof_fpu_regs * wordSize)); +++ for (int i = 0; i < FrameMap::nof_fpu_regs; i++) { +++ __ fsd(as_FloatRegister(i), Address(sp, i * wordSize)); +++ } +++ } else { +++ // we define reg_save_layout = 62 as the fixed frame size, +++ // we should also sub 32 * wordSize to sp when save_fpu_registers == false +++ __ addi(sp, sp, -32 * wordSize); +++ } +++ +++ return generate_oop_map(sasm, save_fpu_registers); +++} +++ +++static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registers = true) { +++ if (restore_fpu_registers) { +++ for (int i = 0; i < FrameMap::nof_fpu_regs; i++) { +++ __ fld(as_FloatRegister(i), Address(sp, i * wordSize)); +++ } +++ __ addi(sp, sp, FrameMap::nof_fpu_regs * wordSize); +++ } else { +++ // we define reg_save_layout = 64 as the fixed frame size, +++ // we should also add 32 * wordSize to sp when save_fpu_registers == false +++ __ addi(sp, sp, 32 * wordSize); +++ } +++ +++ // if the number of popped regs is odd, the reserved slot for alignment will be removed +++ __ pop_reg(RegSet::range(x5, x31), sp); // integer registers except ra(x1) & sp(x2) & gp(x3) & tp(x4) +++} +++ +++static void restore_live_registers_except_r10(StubAssembler* sasm, bool restore_fpu_registers = true) { +++ if (restore_fpu_registers) { +++ for (int i = 0; i < FrameMap::nof_fpu_regs; i++) { +++ __ fld(as_FloatRegister(i), Address(sp, i * wordSize)); +++ } +++ __ addi(sp, sp, FrameMap::nof_fpu_regs * wordSize); +++ } else { +++ // we define reg_save_layout = 64 as the fixed frame size, +++ // we should also add 32 * wordSize to sp when save_fpu_registers == false +++ __ addi(sp, sp, 32 * wordSize); +++ } +++ +++ // pop integer registers except ra(x1) & sp(x2) & gp(x3) & tp(x4) & x10 +++ // there is one reserved slot for alignment on the stack in save_live_registers(). +++ __ pop_reg(RegSet::range(x5, x9), sp); // pop x5 ~ x9 with the reserved slot for alignment +++ __ pop_reg(RegSet::range(x11, x31), sp); // pop x11 ~ x31; x10 will be automatically skipped here +++} +++ +++void Runtime1::initialize_pd() { +++ int i = 0; +++ int sp_offset = 0; +++ const int step = 2; // SP offsets are in halfwords +++ +++ // all float registers are saved explicitly +++ for (i = 0; i < FrameMap::nof_fpu_regs; i++) { +++ fpu_reg_save_offsets[i] = sp_offset; +++ sp_offset += step; +++ } +++ +++ // a slot reserved for stack 16-byte alignment, see MacroAssembler::push_reg +++ sp_offset += step; +++ // we save x5 ~ x31, except x0 ~ x4: loop starts from x5 +++ for (i = 5; i < FrameMap::nof_cpu_regs; i++) { +++ cpu_reg_save_offsets[i] = sp_offset; +++ sp_offset += step; +++ } +++} +++ +++// target: the entry point of the method that creates and posts the exception oop +++// has_argument: true if the exception needs arguments (passed in t0 and t1) +++ +++OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) { +++ // make a frame and preserve the caller's caller-save registers +++ OopMap* oop_map = save_live_registers(sasm); +++ assert_cond(oop_map != NULL); +++ int call_offset = 0; +++ if (!has_argument) { +++ call_offset = __ call_RT(noreg, noreg, target); +++ } else { +++ __ mv(c_rarg1, t0); +++ __ mv(c_rarg2, t1); +++ call_offset = __ call_RT(noreg, noreg, target); +++ } +++ OopMapSet* oop_maps = new OopMapSet(); +++ assert_cond(oop_maps != NULL); +++ oop_maps->add_gc_map(call_offset, oop_map); +++ +++ return oop_maps; +++} +++ +++OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) { +++ __ block_comment("generate_handle_exception"); +++ +++ // incoming parameters +++ const Register exception_oop = x10; +++ const Register exception_pc = x13; +++ +++ OopMapSet* oop_maps = new OopMapSet(); +++ assert_cond(oop_maps != NULL); +++ OopMap* oop_map = NULL; +++ +++ switch (id) { +++ case forward_exception_id: +++ // We're handling an exception in the context of a compiled frame. +++ // The registers have been saved in the standard places. Perform +++ // an exception lookup in the caller and dispatch to the handler +++ // if found. Otherwise unwind and dispatch to the callers +++ // exception handler. +++ oop_map = generate_oop_map(sasm, 1 /* thread */); +++ +++ // load and clear pending exception oop into x10 +++ __ ld(exception_oop, Address(xthread, Thread::pending_exception_offset())); +++ __ sd(zr, Address(xthread, Thread::pending_exception_offset())); +++ +++ // load issuing PC (the return address for this stub) into x13 +++ __ ld(exception_pc, Address(fp, frame::return_addr_offset * BytesPerWord)); +++ +++ // make sure that the vm_results are cleared (may be unnecessary) +++ __ sd(zr, Address(xthread, JavaThread::vm_result_offset())); +++ __ sd(zr, Address(xthread, JavaThread::vm_result_2_offset())); +++ break; +++ case handle_exception_nofpu_id: +++ case handle_exception_id: +++ // At this point all registers MAY be live. +++ oop_map = save_live_registers(sasm, id != handle_exception_nofpu_id); +++ break; +++ case handle_exception_from_callee_id: { +++ // At this point all registers except exception oop (x10) and +++ // exception pc (ra) are dead. +++ const int frame_size = 2 /* fp, return address */; +++ oop_map = new OopMap(frame_size * VMRegImpl::slots_per_word, 0); +++ sasm->set_frame_size(frame_size); +++ break; +++ } +++ default: ShouldNotReachHere(); +++ } +++ +++ // verify that only x10 and x13 are valid at this time +++ __ invalidate_registers(false, true, true, false, true, true); +++ // verify that x10 contains a valid exception +++ __ verify_not_null_oop(exception_oop); +++ +++#ifdef ASSERT +++ // check that fields in JavaThread for exception oop and issuing pc are +++ // empty before writing to them +++ Label oop_empty; +++ __ ld(t0, Address(xthread, JavaThread::exception_oop_offset())); +++ __ beqz(t0, oop_empty); +++ __ stop("exception oop already set"); +++ __ bind(oop_empty); +++ +++ Label pc_empty; +++ __ ld(t0, Address(xthread, JavaThread::exception_pc_offset())); +++ __ beqz(t0, pc_empty); +++ __ stop("exception pc already set"); +++ __ bind(pc_empty); +++#endif +++ +++ // save exception oop and issuing pc into JavaThread +++ // (exception handler will load it from here) +++ __ sd(exception_oop, Address(xthread, JavaThread::exception_oop_offset())); +++ __ sd(exception_pc, Address(xthread, JavaThread::exception_pc_offset())); +++ +++ // patch throwing pc into return address (has bci & oop map) +++ __ sd(exception_pc, Address(fp, frame::return_addr_offset * BytesPerWord)); +++ +++ // compute the exception handler. +++ // the exception oop and the throwing pc are read from the fields in JavaThread +++ int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc)); +++ guarantee(oop_map != NULL, "NULL oop_map!"); +++ oop_maps->add_gc_map(call_offset, oop_map); +++ +++ // x10: handler address +++ // will be the deopt blob if nmethod was deoptimized while we looked up +++ // handler regardless of whether handler existed in the nmethod. +++ +++ // only x10 is valid at this time, all other registers have been destroyed by the runtime call +++ __ invalidate_registers(false, true, true, true, true, true); +++ +++ // patch the return address, this stub will directly return to the exception handler +++ __ sd(x10, Address(fp, frame::return_addr_offset * BytesPerWord)); +++ +++ switch (id) { +++ case forward_exception_id: +++ case handle_exception_nofpu_id: +++ case handle_exception_id: +++ // Restore the registers that were saved at the beginning. +++ restore_live_registers(sasm, id != handle_exception_nofpu_id); +++ break; +++ case handle_exception_from_callee_id: +++ break; +++ default: ShouldNotReachHere(); +++ } +++ +++ return oop_maps; +++} +++ +++ +++void Runtime1::generate_unwind_exception(StubAssembler *sasm) { +++ // incoming parameters +++ const Register exception_oop = x10; +++ // other registers used in this stub +++ const Register handler_addr = x11; +++ +++ // verify that only x10, is valid at this time +++ __ invalidate_registers(false, true, true, true, true, true); +++ +++#ifdef ASSERT +++ // check that fields in JavaThread for exception oop and issuing pc are empty +++ Label oop_empty; +++ __ ld(t0, Address(xthread, JavaThread::exception_oop_offset())); +++ __ beqz(t0, oop_empty); +++ __ stop("exception oop must be empty"); +++ __ bind(oop_empty); +++ +++ Label pc_empty; +++ __ ld(t0, Address(xthread, JavaThread::exception_pc_offset())); +++ __ beqz(t0, pc_empty); +++ __ stop("exception pc must be empty"); +++ __ bind(pc_empty); +++#endif +++ +++ // Save our return address because +++ // exception_handler_for_return_address will destroy it. We also +++ // save exception_oop +++ __ addi(sp, sp, -2 * wordSize); +++ __ sd(exception_oop, Address(sp, wordSize)); +++ __ sd(ra, Address(sp)); +++ +++ // search the exception handler address of the caller (using the return address) +++ __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), xthread, ra); +++ // x10: exception handler address of the caller +++ +++ // Only x10 is valid at this time; all other registers have been +++ // destroyed by the call. +++ __ invalidate_registers(false, true, true, true, false, true); +++ +++ // move result of call into correct register +++ __ mv(handler_addr, x10); +++ +++ // get throwing pc (= return address). +++ // ra has been destroyed by the call +++ __ ld(ra, Address(sp)); +++ __ ld(exception_oop, Address(sp, wordSize)); +++ __ addi(sp, sp, 2 * wordSize); +++ __ mv(x13, ra); +++ +++ __ verify_not_null_oop(exception_oop); +++ +++ // continue at exception handler (return address removed) +++ // note: do *not* remove arguments when unwinding the +++ // activation since the caller assumes having +++ // all arguments on the stack when entering the +++ // runtime to determine the exception handler +++ // (GC happens at call site with arguments!) +++ // x10: exception oop +++ // x13: throwing pc +++ // x11: exception handler +++ __ jr(handler_addr); +++} +++ +++OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) { +++ // use the maximum number of runtime-arguments here because it is difficult to +++ // distinguish each RT-Call. +++ // Note: This number affects also the RT-Call in generate_handle_exception because +++ // the oop-map is shared for all calls. +++ DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); +++ assert(deopt_blob != NULL, "deoptimization blob must have been created"); +++ +++ OopMap* oop_map = save_live_registers(sasm); +++ assert_cond(oop_map != NULL); +++ +++ __ mv(c_rarg0, xthread); +++ Label retaddr; +++ __ set_last_Java_frame(sp, fp, retaddr, t0); +++ // do the call +++ RuntimeAddress addr(target); +++ __ relocate(addr.rspec(), [&] { +++ int32_t offset; +++ __ la_patchable(t0, addr, offset); +++ __ jalr(x1, t0, offset); +++ }); +++ __ bind(retaddr); +++ OopMapSet* oop_maps = new OopMapSet(); +++ assert_cond(oop_maps != NULL); +++ oop_maps->add_gc_map(__ offset(), oop_map); +++ // verify callee-saved register +++#ifdef ASSERT +++ { Label L; +++ __ get_thread(t0); +++ __ beq(xthread, t0, L); +++ __ stop("StubAssembler::call_RT: xthread not callee saved?"); +++ __ bind(L); +++ } +++#endif +++ __ reset_last_Java_frame(true); +++ +++#ifdef ASSERT +++ // Check that fields in JavaThread for exception oop and issuing pc are empty +++ Label oop_empty; +++ __ ld(t0, Address(xthread, Thread::pending_exception_offset())); +++ __ beqz(t0, oop_empty); +++ __ stop("exception oop must be empty"); +++ __ bind(oop_empty); +++ +++ Label pc_empty; +++ __ ld(t0, Address(xthread, JavaThread::exception_pc_offset())); +++ __ beqz(t0, pc_empty); +++ __ stop("exception pc must be empty"); +++ __ bind(pc_empty); +++#endif +++ +++ // Runtime will return true if the nmethod has been deoptimized, this is the +++ // expected scenario and anything else is an error. Note that we maintain a +++ // check on the result purely as a defensive measure. +++ Label no_deopt; +++ __ beqz(x10, no_deopt); // Have we deoptimized? +++ +++ // Perform a re-execute. The proper return address is already on the stack, +++ // we just need to restore registers, pop all of our frames but the return +++ // address and jump to the deopt blob. +++ +++ restore_live_registers(sasm); +++ __ leave(); +++ __ far_jump(RuntimeAddress(deopt_blob->unpack_with_reexecution())); +++ +++ __ bind(no_deopt); +++ __ stop("deopt not performed"); +++ +++ return oop_maps; +++} +++ +++OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { +++ // for better readability +++ const bool dont_gc_arguments = false; +++ +++ // default value; overwritten for some optimized stubs that are called from methods that do not use the fpu +++ bool save_fpu_registers = true; +++ +++ // stub code & info for the different stubs +++ OopMapSet* oop_maps = NULL; +++ switch (id) { +++ { +++ case forward_exception_id: +++ { +++ oop_maps = generate_handle_exception(id, sasm); +++ __ leave(); +++ __ ret(); +++ } +++ break; +++ +++ case throw_div0_exception_id: +++ { +++ StubFrame f(sasm, "throw_div0_exception", dont_gc_arguments, does_not_return); +++ oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false); +++ } +++ break; +++ +++ case throw_null_pointer_exception_id: +++ { StubFrame f(sasm, "throw_null_pointer_exception", dont_gc_arguments, does_not_return); +++ oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false); +++ } +++ break; +++ +++ case new_instance_id: +++ case fast_new_instance_id: +++ case fast_new_instance_init_check_id: +++ { +++ Register klass = x13; // Incoming +++ Register obj = x10; // Result +++ +++ if (id == new_instance_id) { +++ __ set_info("new_instance", dont_gc_arguments); +++ } else if (id == fast_new_instance_id) { +++ __ set_info("fast new_instance", dont_gc_arguments); +++ } else { +++ assert(id == fast_new_instance_init_check_id, "bad StubID"); +++ __ set_info("fast new_instance init check", dont_gc_arguments); +++ } +++ +++ // If TLAB is disabled, see if there is support for inlining contiguous +++ // allocations. +++ // Otherwise, just go to the slow path. +++ if ((id == fast_new_instance_id || id == fast_new_instance_init_check_id) && +++ !UseTLAB && Universe::heap()->supports_inline_contig_alloc()) { +++ Label slow_path; +++ Register obj_size = x12; +++ Register tmp1 = x9; +++ Register tmp2 = x14; +++ assert_different_registers(klass, obj, obj_size, tmp1, tmp2); +++ +++ const int sp_offset = 2; +++ const int x9_offset = 1; +++ const int zr_offset = 0; +++ __ addi(sp, sp, -(sp_offset * wordSize)); +++ __ sd(x9, Address(sp, x9_offset * wordSize)); +++ __ sd(zr, Address(sp, zr_offset * wordSize)); +++ +++ if (id == fast_new_instance_init_check_id) { +++ // make sure the klass is initialized +++ __ lbu(t0, Address(klass, InstanceKlass::init_state_offset())); +++ __ mv(t1, InstanceKlass::fully_initialized); +++ __ bne(t0, t1, slow_path); +++ } +++ +++#ifdef ASSERT +++ // assert object can be fast path allocated +++ { +++ Label ok, not_ok; +++ __ lw(obj_size, Address(klass, Klass::layout_helper_offset())); +++ // make sure it's an instance. For instances, layout helper is a positive number. +++ // For arrays, layout helper is a negative number +++ __ blez(obj_size, not_ok); +++ __ andi(t0, obj_size, Klass::_lh_instance_slow_path_bit); +++ __ beqz(t0, ok); +++ __ bind(not_ok); +++ __ stop("assert(can be fast path allocated)"); +++ __ should_not_reach_here(); +++ __ bind(ok); +++ } +++#endif // ASSERT +++ +++ // get the instance size +++ __ lwu(obj_size, Address(klass, Klass::layout_helper_offset())); +++ +++ __ eden_allocate(obj, obj_size, 0, tmp1, slow_path); +++ +++ __ initialize_object(obj, klass, obj_size, 0, tmp1, tmp2, /* is_tlab_allocated */ false); +++ __ verify_oop(obj); +++ __ ld(x9, Address(sp, x9_offset * wordSize)); +++ __ ld(zr, Address(sp, zr_offset * wordSize)); +++ __ addi(sp, sp, sp_offset * wordSize); +++ __ ret(); +++ +++ __ bind(slow_path); +++ __ ld(x9, Address(sp, x9_offset * wordSize)); +++ __ ld(zr, Address(sp, zr_offset * wordSize)); +++ __ addi(sp, sp, sp_offset * wordSize); +++ } +++ +++ __ enter(); +++ OopMap* map = save_live_registers(sasm); +++ assert_cond(map != NULL); +++ int call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_instance), klass); +++ oop_maps = new OopMapSet(); +++ assert_cond(oop_maps != NULL); +++ oop_maps->add_gc_map(call_offset, map); +++ restore_live_registers_except_r10(sasm); +++ __ verify_oop(obj); +++ __ leave(); +++ __ ret(); +++ +++ // x10: new instance +++ } +++ +++ break; +++ +++ case counter_overflow_id: +++ { +++ Register bci = x10; +++ Register method = x11; +++ __ enter(); +++ OopMap* map = save_live_registers(sasm); +++ assert_cond(map != NULL); +++ +++ const int bci_off = 0; +++ const int method_off = 1; +++ // Retrieve bci +++ __ lw(bci, Address(fp, bci_off * BytesPerWord)); +++ // And a pointer to the Method* +++ __ ld(method, Address(fp, method_off * BytesPerWord)); +++ int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), bci, method); +++ oop_maps = new OopMapSet(); +++ assert_cond(oop_maps != NULL); +++ oop_maps->add_gc_map(call_offset, map); +++ restore_live_registers(sasm); +++ __ leave(); +++ __ ret(); +++ } +++ break; +++ +++ case new_type_array_id: +++ case new_object_array_id: +++ { +++ Register length = x9; // Incoming +++ Register klass = x13; // Incoming +++ Register obj = x10; // Result +++ +++ if (id == new_type_array_id) { +++ __ set_info("new_type_array", dont_gc_arguments); +++ } else { +++ __ set_info("new_object_array", dont_gc_arguments); +++ } +++ +++#ifdef ASSERT +++ // assert object type is really an array of the proper kind +++ { +++ Label ok; +++ Register tmp = obj; +++ __ lwu(tmp, Address(klass, Klass::layout_helper_offset())); +++ __ sraiw(tmp, tmp, Klass::_lh_array_tag_shift); +++ int tag = ((id == new_type_array_id) ? Klass::_lh_array_tag_type_value : Klass::_lh_array_tag_obj_value); +++ __ mv(t0, tag); +++ __ beq(t0, tmp, ok); +++ __ stop("assert(is an array klass)"); +++ __ should_not_reach_here(); +++ __ bind(ok); +++ } +++#endif // ASSERT +++ +++ // If TLAB is disabled, see if there is support for inlining contiguous +++ // allocations. +++ // Otherwise, just go to the slow path. +++ if (!UseTLAB && Universe::heap()->supports_inline_contig_alloc()) { +++ Register arr_size = x14; +++ Register tmp1 = x12; +++ Register tmp2 = x15; +++ Label slow_path; +++ assert_different_registers(length, klass, obj, arr_size, tmp1, tmp2); +++ +++ // check that array length is small enough for fast path. +++ __ mv(t0, C1_MacroAssembler::max_array_allocation_length); +++ __ bgtu(length, t0, slow_path); +++ +++ // get the allocation size: round_up(hdr + length << (layout_helper & 0x1F)) +++ __ lwu(tmp1, Address(klass, Klass::layout_helper_offset())); +++ __ andi(t0, tmp1, 0x1f); +++ __ sll(arr_size, length, t0); +++ int lh_header_size_width = exact_log2(Klass::_lh_header_size_mask + 1); +++ int lh_header_size_msb = Klass::_lh_header_size_shift + lh_header_size_width; +++ __ slli(tmp1, tmp1, XLEN - lh_header_size_msb); +++ __ srli(tmp1, tmp1, XLEN - lh_header_size_width); +++ __ add(arr_size, arr_size, tmp1); +++ __ addi(arr_size, arr_size, MinObjAlignmentInBytesMask); // align up +++ __ andi(arr_size, arr_size, ~(uint)MinObjAlignmentInBytesMask); +++ +++ __ eden_allocate(obj, arr_size, 0, tmp1, slow_path); // preserves arr_size +++ +++ __ initialize_header(obj, klass, length, tmp1, tmp2); +++ __ lbu(tmp1, Address(klass, +++ in_bytes(Klass::layout_helper_offset()) + +++ (Klass::_lh_header_size_shift / BitsPerByte))); +++ assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise"); +++ assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise"); +++ __ andi(tmp1, tmp1, Klass::_lh_header_size_mask); +++ __ sub(arr_size, arr_size, tmp1); // body length +++ __ add(tmp1, tmp1, obj); // body start +++ __ initialize_body(tmp1, arr_size, 0, tmp2); +++ __ membar(MacroAssembler::StoreStore); +++ __ verify_oop(obj); +++ +++ __ ret(); +++ +++ __ bind(slow_path); +++ } +++ +++ __ enter(); +++ OopMap* map = save_live_registers(sasm); +++ assert_cond(map != NULL); +++ int call_offset = 0; +++ if (id == new_type_array_id) { +++ call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length); +++ } else { +++ call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length); +++ } +++ +++ oop_maps = new OopMapSet(); +++ assert_cond(oop_maps != NULL); +++ oop_maps->add_gc_map(call_offset, map); +++ restore_live_registers_except_r10(sasm); +++ +++ __ verify_oop(obj); +++ __ leave(); +++ __ ret(); +++ +++ // x10: new array +++ } +++ break; +++ +++ case new_multi_array_id: +++ { +++ StubFrame f(sasm, "new_multi_array", dont_gc_arguments); +++ // x10: klass +++ // x9: rank +++ // x12: address of 1st dimension +++ OopMap* map = save_live_registers(sasm); +++ assert_cond(map != NULL); +++ __ mv(c_rarg1, x10); +++ __ mv(c_rarg3, x12); +++ __ mv(c_rarg2, x9); +++ int call_offset = __ call_RT(x10, noreg, CAST_FROM_FN_PTR(address, new_multi_array), x11, x12, x13); +++ +++ oop_maps = new OopMapSet(); +++ assert_cond(oop_maps != NULL); +++ oop_maps->add_gc_map(call_offset, map); +++ restore_live_registers_except_r10(sasm); +++ +++ // x10: new multi array +++ __ verify_oop(x10); +++ } +++ break; +++ +++ case register_finalizer_id: +++ { +++ __ set_info("register_finalizer", dont_gc_arguments); +++ +++ // This is called via call_runtime so the arguments +++ // will be place in C abi locations +++ __ verify_oop(c_rarg0); +++ +++ // load the klass and check the has finalizer flag +++ Label register_finalizer; +++ Register t = x15; +++ __ load_klass(t, x10); +++ __ lwu(t, Address(t, Klass::access_flags_offset())); +++ __ test_bit(t0, t, exact_log2(JVM_ACC_HAS_FINALIZER)); +++ __ bnez(t0, register_finalizer); +++ __ ret(); +++ +++ __ bind(register_finalizer); +++ __ enter(); +++ OopMap* oop_map = save_live_registers(sasm); +++ assert_cond(oop_map != NULL); +++ int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), x10); +++ oop_maps = new OopMapSet(); +++ assert_cond(oop_maps != NULL); +++ oop_maps->add_gc_map(call_offset, oop_map); +++ +++ // Now restore all the live registers +++ restore_live_registers(sasm); +++ +++ __ leave(); +++ __ ret(); +++ } +++ break; +++ +++ case throw_class_cast_exception_id: +++ { +++ StubFrame f(sasm, "throw_class_cast_exception", dont_gc_arguments, does_not_return); +++ oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true); +++ } +++ break; +++ +++ case throw_incompatible_class_change_error_id: +++ { +++ StubFrame f(sasm, "throw_incompatible_class_cast_exception", dont_gc_arguments, does_not_return); +++ oop_maps = generate_exception_throw(sasm, +++ CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false); +++ } +++ break; +++ +++ case slow_subtype_check_id: +++ { +++ // Typical calling sequence: +++ // push klass_RInfo (object klass or other subclass) +++ // push sup_k_RInfo (array element klass or other superclass) +++ // jump to slow_subtype_check +++ // Note that the subclass is pushed first, and is therefore deepest. +++ enum layout { +++ x10_off, x10_off_hi, +++ x12_off, x12_off_hi, +++ x14_off, x14_off_hi, +++ x15_off, x15_off_hi, +++ sup_k_off, sup_k_off_hi, +++ klass_off, klass_off_hi, +++ framesize, +++ result_off = sup_k_off +++ }; +++ +++ __ set_info("slow_subtype_check", dont_gc_arguments); +++ __ push_reg(RegSet::of(x10, x12, x14, x15), sp); +++ +++ __ ld(x14, Address(sp, (klass_off) * VMRegImpl::stack_slot_size)); // sub klass +++ __ ld(x10, Address(sp, (sup_k_off) * VMRegImpl::stack_slot_size)); // super klass +++ +++ Label miss; +++ __ check_klass_subtype_slow_path(x14, x10, x12, x15, NULL, &miss); +++ +++ // fallthrough on success: +++ __ mv(t0, 1); +++ __ sd(t0, Address(sp, (result_off) * VMRegImpl::stack_slot_size)); // result +++ __ pop_reg(RegSet::of(x10, x12, x14, x15), sp); +++ __ ret(); +++ +++ __ bind(miss); +++ __ sd(zr, Address(sp, (result_off) * VMRegImpl::stack_slot_size)); // result +++ __ pop_reg(RegSet::of(x10, x12, x14, x15), sp); +++ __ ret(); +++ } +++ break; +++ +++ case monitorenter_nofpu_id: +++ save_fpu_registers = false; +++ // fall through +++ case monitorenter_id: +++ { +++ StubFrame f(sasm, "monitorenter", dont_gc_arguments); +++ OopMap* map = save_live_registers(sasm, save_fpu_registers); +++ assert_cond(map != NULL); +++ +++ // Called with store_parameter and not C abi +++ f.load_argument(1, x10); // x10: object +++ f.load_argument(0, x11); // x11: lock address +++ +++ int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorenter), x10, x11); +++ +++ oop_maps = new OopMapSet(); +++ assert_cond(oop_maps != NULL); +++ oop_maps->add_gc_map(call_offset, map); +++ restore_live_registers(sasm, save_fpu_registers); +++ } +++ break; +++ +++ case monitorexit_nofpu_id: +++ save_fpu_registers = false; +++ // fall through +++ case monitorexit_id: +++ { +++ StubFrame f(sasm, "monitorexit", dont_gc_arguments); +++ OopMap* map = save_live_registers(sasm, save_fpu_registers); +++ assert_cond(map != NULL); +++ +++ // Called with store_parameter and not C abi +++ f.load_argument(0, x10); // x10: lock address +++ +++ // note: really a leaf routine but must setup last java sp +++ // => use call_RT for now (speed can be improved by +++ // doing last java sp setup manually) +++ int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorexit), x10); +++ +++ oop_maps = new OopMapSet(); +++ assert_cond(oop_maps != NULL); +++ oop_maps->add_gc_map(call_offset, map); +++ restore_live_registers(sasm, save_fpu_registers); +++ } +++ break; +++ +++ case deoptimize_id: +++ { +++ StubFrame f(sasm, "deoptimize", dont_gc_arguments, does_not_return); +++ OopMap* oop_map = save_live_registers(sasm); +++ assert_cond(oop_map != NULL); +++ f.load_argument(0, c_rarg1); +++ int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, deoptimize), c_rarg1); +++ +++ oop_maps = new OopMapSet(); +++ assert_cond(oop_maps != NULL); +++ oop_maps->add_gc_map(call_offset, oop_map); +++ restore_live_registers(sasm); +++ DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); +++ assert(deopt_blob != NULL, "deoptimization blob must have been created"); +++ __ leave(); +++ __ far_jump(RuntimeAddress(deopt_blob->unpack_with_reexecution())); +++ } +++ break; +++ +++ case throw_range_check_failed_id: +++ { +++ StubFrame f(sasm, "range_check_failed", dont_gc_arguments, does_not_return); +++ oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), true); +++ } +++ break; +++ +++ case unwind_exception_id: +++ { +++ __ set_info("unwind_exception", dont_gc_arguments); +++ // note: no stubframe since we are about to leave the current +++ // activation and we are calling a leaf VM function only. +++ generate_unwind_exception(sasm); +++ } +++ break; +++ +++ case access_field_patching_id: +++ { +++ StubFrame f(sasm, "access_field_patching", dont_gc_arguments, does_not_return); +++ // we should set up register map +++ oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching)); +++ } +++ break; +++ +++ case load_klass_patching_id: +++ { +++ StubFrame f(sasm, "load_klass_patching", dont_gc_arguments, does_not_return); +++ // we should set up register map +++ oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_klass_patching)); +++ } +++ break; +++ +++ case load_mirror_patching_id: +++ { +++ StubFrame f(sasm, "load_mirror_patching", dont_gc_arguments, does_not_return); +++ // we should set up register map +++ oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_mirror_patching)); +++ } +++ break; +++ +++ case load_appendix_patching_id: +++ { +++ StubFrame f(sasm, "load_appendix_patching", dont_gc_arguments, does_not_return); +++ // we should set up register map +++ oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching)); +++ } +++ break; +++ +++ case handle_exception_nofpu_id: +++ case handle_exception_id: +++ { +++ StubFrame f(sasm, "handle_exception", dont_gc_arguments); +++ oop_maps = generate_handle_exception(id, sasm); +++ } +++ break; +++ +++ case handle_exception_from_callee_id: +++ { +++ StubFrame f(sasm, "handle_exception_from_callee", dont_gc_arguments); +++ oop_maps = generate_handle_exception(id, sasm); +++ } +++ break; +++ +++ case throw_index_exception_id: +++ { +++ StubFrame f(sasm, "index_range_check_failed", dont_gc_arguments, does_not_return); +++ oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_index_exception), true); +++ } +++ break; +++ +++ case throw_array_store_exception_id: +++ { +++ StubFrame f(sasm, "throw_array_store_exception", dont_gc_arguments, does_not_return); +++ // tos + 0: link +++ // + 1: return address +++ oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_array_store_exception), true); +++ } +++ break; +++ +++ case predicate_failed_trap_id: +++ { +++ StubFrame f(sasm, "predicate_failed_trap", dont_gc_arguments, does_not_return); +++ +++ OopMap* map = save_live_registers(sasm); +++ assert_cond(map != NULL); +++ +++ int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, predicate_failed_trap)); +++ oop_maps = new OopMapSet(); +++ assert_cond(oop_maps != NULL); +++ oop_maps->add_gc_map(call_offset, map); +++ restore_live_registers(sasm); +++ __ leave(); +++ DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); +++ assert(deopt_blob != NULL, "deoptimization blob must have been created"); +++ +++ __ far_jump(RuntimeAddress(deopt_blob->unpack_with_reexecution())); +++ } +++ break; +++ +++ case dtrace_object_alloc_id: +++ { // c_rarg0: object +++ StubFrame f(sasm, "dtrace_object_alloc", dont_gc_arguments); +++ save_live_registers(sasm); +++ +++ __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), c_rarg0); +++ +++ restore_live_registers(sasm); +++ } +++ break; +++ +++ default: +++ { +++ StubFrame f(sasm, "unimplemented entry", dont_gc_arguments, does_not_return); +++ __ mv(x10, (int)id); +++ __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), x10); +++ __ should_not_reach_here(); +++ } +++ break; +++ } +++ } +++ return oop_maps; +++} +++ +++#undef __ +++ +++const char *Runtime1::pd_name_for_address(address entry) { Unimplemented(); return 0; } ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/c1_globals_riscv.hpp ++@@ -0,0 +1,65 @@ +++/* +++ * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#ifndef CPU_RISCV_C1_GLOBALS_RISCV_HPP +++#define CPU_RISCV_C1_GLOBALS_RISCV_HPP +++ +++#include "utilities/globalDefinitions.hpp" +++#include "utilities/macros.hpp" +++ +++// Sets the default values for platform dependent flags used by the client compiler. +++// (see c1_globals.hpp) +++ +++#ifndef COMPILER2 +++define_pd_global(bool, BackgroundCompilation, true ); +++define_pd_global(bool, InlineIntrinsics, true ); +++define_pd_global(bool, PreferInterpreterNativeStubs, false); +++define_pd_global(bool, ProfileTraps, false); +++define_pd_global(bool, UseOnStackReplacement, true ); +++define_pd_global(bool, TieredCompilation, false); +++define_pd_global(intx, CompileThreshold, 1500 ); +++ +++define_pd_global(intx, OnStackReplacePercentage, 933 ); +++define_pd_global(intx, NewSizeThreadIncrease, 4*K ); +++define_pd_global(intx, InitialCodeCacheSize, 160*K); +++define_pd_global(intx, ReservedCodeCacheSize, 32*M ); +++define_pd_global(intx, NonProfiledCodeHeapSize, 13*M ); +++define_pd_global(intx, ProfiledCodeHeapSize, 14*M ); +++define_pd_global(intx, NonNMethodCodeHeapSize, 5*M ); +++define_pd_global(bool, ProfileInterpreter, false); +++define_pd_global(intx, CodeCacheExpansionSize, 32*K ); +++define_pd_global(uintx, CodeCacheMinBlockLength, 1); +++define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K); +++define_pd_global(bool, NeverActAsServerClassMachine, true ); +++define_pd_global(uint64_t, MaxRAM, 1ULL*G); +++define_pd_global(bool, CICompileOSR, true ); +++#endif // !COMPILER2 +++define_pd_global(bool, UseTypeProfile, false); +++ +++define_pd_global(bool, OptimizeSinglePrecision, true ); +++define_pd_global(bool, CSEArrayLength, false); +++define_pd_global(bool, TwoOperandLIRForm, false); +++ +++#endif // CPU_RISCV_C1_GLOBALS_RISCV_HPP ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/c2_MacroAssembler_riscv.cpp ++@@ -0,0 +1,1644 @@ +++/* +++ * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#include "precompiled.hpp" +++#include "asm/assembler.hpp" +++#include "asm/assembler.inline.hpp" +++#include "opto/c2_MacroAssembler.hpp" +++#include "opto/intrinsicnode.hpp" +++#include "opto/subnode.hpp" +++#include "runtime/stubRoutines.hpp" +++ +++#ifdef PRODUCT +++#define BLOCK_COMMENT(str) /* nothing */ +++#define STOP(error) stop(error) +++#else +++#define BLOCK_COMMENT(str) block_comment(str) +++#define STOP(error) block_comment(error); stop(error) +++#endif +++ +++#define BIND(label) bind(label); BLOCK_COMMENT(#label ":") +++ +++// short string +++// StringUTF16.indexOfChar +++// StringLatin1.indexOfChar +++void C2_MacroAssembler::string_indexof_char_short(Register str1, Register cnt1, +++ Register ch, Register result, +++ bool isL) +++{ +++ Register ch1 = t0; +++ Register index = t1; +++ +++ BLOCK_COMMENT("string_indexof_char_short {"); +++ +++ Label LOOP, LOOP1, LOOP4, LOOP8; +++ Label MATCH, MATCH1, MATCH2, MATCH3, +++ MATCH4, MATCH5, MATCH6, MATCH7, NOMATCH; +++ +++ mv(result, -1); +++ mv(index, zr); +++ +++ bind(LOOP); +++ addi(t0, index, 8); +++ ble(t0, cnt1, LOOP8); +++ addi(t0, index, 4); +++ ble(t0, cnt1, LOOP4); +++ j(LOOP1); +++ +++ bind(LOOP8); +++ isL ? lbu(ch1, Address(str1, 0)) : lhu(ch1, Address(str1, 0)); +++ beq(ch, ch1, MATCH); +++ isL ? lbu(ch1, Address(str1, 1)) : lhu(ch1, Address(str1, 2)); +++ beq(ch, ch1, MATCH1); +++ isL ? lbu(ch1, Address(str1, 2)) : lhu(ch1, Address(str1, 4)); +++ beq(ch, ch1, MATCH2); +++ isL ? lbu(ch1, Address(str1, 3)) : lhu(ch1, Address(str1, 6)); +++ beq(ch, ch1, MATCH3); +++ isL ? lbu(ch1, Address(str1, 4)) : lhu(ch1, Address(str1, 8)); +++ beq(ch, ch1, MATCH4); +++ isL ? lbu(ch1, Address(str1, 5)) : lhu(ch1, Address(str1, 10)); +++ beq(ch, ch1, MATCH5); +++ isL ? lbu(ch1, Address(str1, 6)) : lhu(ch1, Address(str1, 12)); +++ beq(ch, ch1, MATCH6); +++ isL ? lbu(ch1, Address(str1, 7)) : lhu(ch1, Address(str1, 14)); +++ beq(ch, ch1, MATCH7); +++ addi(index, index, 8); +++ addi(str1, str1, isL ? 8 : 16); +++ blt(index, cnt1, LOOP); +++ j(NOMATCH); +++ +++ bind(LOOP4); +++ isL ? lbu(ch1, Address(str1, 0)) : lhu(ch1, Address(str1, 0)); +++ beq(ch, ch1, MATCH); +++ isL ? lbu(ch1, Address(str1, 1)) : lhu(ch1, Address(str1, 2)); +++ beq(ch, ch1, MATCH1); +++ isL ? lbu(ch1, Address(str1, 2)) : lhu(ch1, Address(str1, 4)); +++ beq(ch, ch1, MATCH2); +++ isL ? lbu(ch1, Address(str1, 3)) : lhu(ch1, Address(str1, 6)); +++ beq(ch, ch1, MATCH3); +++ addi(index, index, 4); +++ addi(str1, str1, isL ? 4 : 8); +++ bge(index, cnt1, NOMATCH); +++ +++ bind(LOOP1); +++ isL ? lbu(ch1, Address(str1)) : lhu(ch1, Address(str1)); +++ beq(ch, ch1, MATCH); +++ addi(index, index, 1); +++ addi(str1, str1, isL ? 1 : 2); +++ blt(index, cnt1, LOOP1); +++ j(NOMATCH); +++ +++ bind(MATCH1); +++ addi(index, index, 1); +++ j(MATCH); +++ +++ bind(MATCH2); +++ addi(index, index, 2); +++ j(MATCH); +++ +++ bind(MATCH3); +++ addi(index, index, 3); +++ j(MATCH); +++ +++ bind(MATCH4); +++ addi(index, index, 4); +++ j(MATCH); +++ +++ bind(MATCH5); +++ addi(index, index, 5); +++ j(MATCH); +++ +++ bind(MATCH6); +++ addi(index, index, 6); +++ j(MATCH); +++ +++ bind(MATCH7); +++ addi(index, index, 7); +++ +++ bind(MATCH); +++ mv(result, index); +++ bind(NOMATCH); +++ BLOCK_COMMENT("} string_indexof_char_short"); +++} +++ +++// StringUTF16.indexOfChar +++// StringLatin1.indexOfChar +++void C2_MacroAssembler::string_indexof_char(Register str1, Register cnt1, +++ Register ch, Register result, +++ Register tmp1, Register tmp2, +++ Register tmp3, Register tmp4, +++ bool isL) +++{ +++ Label CH1_LOOP, HIT, NOMATCH, DONE, DO_LONG; +++ Register ch1 = t0; +++ Register orig_cnt = t1; +++ Register mask1 = tmp3; +++ Register mask2 = tmp2; +++ Register match_mask = tmp1; +++ Register trailing_char = tmp4; +++ Register unaligned_elems = tmp4; +++ +++ BLOCK_COMMENT("string_indexof_char {"); +++ beqz(cnt1, NOMATCH); +++ +++ addi(t0, cnt1, isL ? -32 : -16); +++ bgtz(t0, DO_LONG); +++ string_indexof_char_short(str1, cnt1, ch, result, isL); +++ j(DONE); +++ +++ bind(DO_LONG); +++ mv(orig_cnt, cnt1); +++ if (AvoidUnalignedAccesses) { +++ Label ALIGNED; +++ andi(unaligned_elems, str1, 0x7); +++ beqz(unaligned_elems, ALIGNED); +++ sub(unaligned_elems, unaligned_elems, 8); +++ neg(unaligned_elems, unaligned_elems); +++ if (!isL) { +++ srli(unaligned_elems, unaligned_elems, 1); +++ } +++ // do unaligned part per element +++ string_indexof_char_short(str1, unaligned_elems, ch, result, isL); +++ bgez(result, DONE); +++ mv(orig_cnt, cnt1); +++ sub(cnt1, cnt1, unaligned_elems); +++ bind(ALIGNED); +++ } +++ +++ // duplicate ch +++ if (isL) { +++ slli(ch1, ch, 8); +++ orr(ch, ch1, ch); +++ } +++ slli(ch1, ch, 16); +++ orr(ch, ch1, ch); +++ slli(ch1, ch, 32); +++ orr(ch, ch1, ch); +++ +++ if (!isL) { +++ slli(cnt1, cnt1, 1); +++ } +++ +++ uint64_t mask0101 = UCONST64(0x0101010101010101); +++ uint64_t mask0001 = UCONST64(0x0001000100010001); +++ mv(mask1, isL ? mask0101 : mask0001); +++ uint64_t mask7f7f = UCONST64(0x7f7f7f7f7f7f7f7f); +++ uint64_t mask7fff = UCONST64(0x7fff7fff7fff7fff); +++ mv(mask2, isL ? mask7f7f : mask7fff); +++ +++ bind(CH1_LOOP); +++ ld(ch1, Address(str1)); +++ addi(str1, str1, 8); +++ addi(cnt1, cnt1, -8); +++ compute_match_mask(ch1, ch, match_mask, mask1, mask2); +++ bnez(match_mask, HIT); +++ bgtz(cnt1, CH1_LOOP); +++ j(NOMATCH); +++ +++ bind(HIT); +++ ctzc_bit(trailing_char, match_mask, isL, ch1, result); +++ srli(trailing_char, trailing_char, 3); +++ addi(cnt1, cnt1, 8); +++ ble(cnt1, trailing_char, NOMATCH); +++ // match case +++ if (!isL) { +++ srli(cnt1, cnt1, 1); +++ srli(trailing_char, trailing_char, 1); +++ } +++ +++ sub(result, orig_cnt, cnt1); +++ add(result, result, trailing_char); +++ j(DONE); +++ +++ bind(NOMATCH); +++ mv(result, -1); +++ +++ bind(DONE); +++ BLOCK_COMMENT("} string_indexof_char"); +++} +++ +++typedef void (MacroAssembler::* load_chr_insn)(Register rd, const Address &adr, Register temp); +++ +++// Search for needle in haystack and return index or -1 +++// x10: result +++// x11: haystack +++// x12: haystack_len +++// x13: needle +++// x14: needle_len +++void C2_MacroAssembler::string_indexof(Register haystack, Register needle, +++ Register haystack_len, Register needle_len, +++ Register tmp1, Register tmp2, +++ Register tmp3, Register tmp4, +++ Register tmp5, Register tmp6, +++ Register result, int ae) +++{ +++ assert(ae != StrIntrinsicNode::LU, "Invalid encoding"); +++ +++ Label LINEARSEARCH, LINEARSTUB, DONE, NOMATCH; +++ +++ Register ch1 = t0; +++ Register ch2 = t1; +++ Register nlen_tmp = tmp1; // needle len tmp +++ Register hlen_tmp = tmp2; // haystack len tmp +++ Register result_tmp = tmp4; +++ +++ bool isLL = ae == StrIntrinsicNode::LL; +++ +++ bool needle_isL = ae == StrIntrinsicNode::LL || ae == StrIntrinsicNode::UL; +++ bool haystack_isL = ae == StrIntrinsicNode::LL || ae == StrIntrinsicNode::LU; +++ int needle_chr_shift = needle_isL ? 0 : 1; +++ int haystack_chr_shift = haystack_isL ? 0 : 1; +++ int needle_chr_size = needle_isL ? 1 : 2; +++ int haystack_chr_size = haystack_isL ? 1 : 2; +++ load_chr_insn needle_load_1chr = needle_isL ? (load_chr_insn)&MacroAssembler::lbu : +++ (load_chr_insn)&MacroAssembler::lhu; +++ load_chr_insn haystack_load_1chr = haystack_isL ? (load_chr_insn)&MacroAssembler::lbu : +++ (load_chr_insn)&MacroAssembler::lhu; +++ +++ BLOCK_COMMENT("string_indexof {"); +++ +++ // Note, inline_string_indexOf() generates checks: +++ // if (pattern.count > src.count) return -1; +++ // if (pattern.count == 0) return 0; +++ +++ // We have two strings, a source string in haystack, haystack_len and a pattern string +++ // in needle, needle_len. Find the first occurence of pattern in source or return -1. +++ +++ // For larger pattern and source we use a simplified Boyer Moore algorithm. +++ // With a small pattern and source we use linear scan. +++ +++ // needle_len >=8 && needle_len < 256 && needle_len < haystack_len/4, use bmh algorithm. +++ sub(result_tmp, haystack_len, needle_len); +++ // needle_len < 8, use linear scan +++ sub(t0, needle_len, 8); +++ bltz(t0, LINEARSEARCH); +++ // needle_len >= 256, use linear scan +++ sub(t0, needle_len, 256); +++ bgez(t0, LINEARSTUB); +++ // needle_len >= haystack_len/4, use linear scan +++ srli(t0, haystack_len, 2); +++ bge(needle_len, t0, LINEARSTUB); +++ +++ // Boyer-Moore-Horspool introduction: +++ // The Boyer Moore alogorithm is based on the description here:- +++ // +++ // http://en.wikipedia.org/wiki/Boyer%E2%80%93Moore_string_search_algorithm +++ // +++ // This describes and algorithm with 2 shift rules. The 'Bad Character' rule +++ // and the 'Good Suffix' rule. +++ // +++ // These rules are essentially heuristics for how far we can shift the +++ // pattern along the search string. +++ // +++ // The implementation here uses the 'Bad Character' rule only because of the +++ // complexity of initialisation for the 'Good Suffix' rule. +++ // +++ // This is also known as the Boyer-Moore-Horspool algorithm: +++ // +++ // http://en.wikipedia.org/wiki/Boyer-Moore-Horspool_algorithm +++ // +++ // #define ASIZE 256 +++ // +++ // int bm(unsigned char *pattern, int m, unsigned char *src, int n) { +++ // int i, j; +++ // unsigned c; +++ // unsigned char bc[ASIZE]; +++ // +++ // /* Preprocessing */ +++ // for (i = 0; i < ASIZE; ++i) +++ // bc[i] = m; +++ // for (i = 0; i < m - 1; ) { +++ // c = pattern[i]; +++ // ++i; +++ // // c < 256 for Latin1 string, so, no need for branch +++ // #ifdef PATTERN_STRING_IS_LATIN1 +++ // bc[c] = m - i; +++ // #else +++ // if (c < ASIZE) bc[c] = m - i; +++ // #endif +++ // } +++ // +++ // /* Searching */ +++ // j = 0; +++ // while (j <= n - m) { +++ // c = src[i+j]; +++ // if (pattern[m-1] == c) +++ // int k; +++ // for (k = m - 2; k >= 0 && pattern[k] == src[k + j]; --k); +++ // if (k < 0) return j; +++ // // c < 256 for Latin1 string, so, no need for branch +++ // #ifdef SOURCE_STRING_IS_LATIN1_AND_PATTERN_STRING_IS_LATIN1 +++ // // LL case: (c< 256) always true. Remove branch +++ // j += bc[pattern[j+m-1]]; +++ // #endif +++ // #ifdef SOURCE_STRING_IS_UTF_AND_PATTERN_STRING_IS_UTF +++ // // UU case: need if (c if not. +++ // if (c < ASIZE) +++ // j += bc[pattern[j+m-1]]; +++ // else +++ // j += m +++ // #endif +++ // } +++ // return -1; +++ // } +++ +++ // temp register:t0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, result +++ Label BCLOOP, BCSKIP, BMLOOPSTR2, BMLOOPSTR1, BMSKIP, BMADV, BMMATCH, +++ BMLOOPSTR1_LASTCMP, BMLOOPSTR1_CMP, BMLOOPSTR1_AFTER_LOAD, BM_INIT_LOOP; +++ +++ Register haystack_end = haystack_len; +++ Register skipch = tmp2; +++ +++ // pattern length is >=8, so, we can read at least 1 register for cases when +++ // UTF->Latin1 conversion is not needed(8 LL or 4UU) and half register for +++ // UL case. We'll re-read last character in inner pre-loop code to have +++ // single outer pre-loop load +++ const int firstStep = isLL ? 7 : 3; +++ +++ const int ASIZE = 256; +++ const int STORE_BYTES = 8; // 8 bytes stored per instruction(sd) +++ +++ sub(sp, sp, ASIZE); +++ +++ // init BC offset table with default value: needle_len +++ slli(t0, needle_len, 8); +++ orr(t0, t0, needle_len); // [63...16][needle_len][needle_len] +++ slli(tmp1, t0, 16); +++ orr(t0, tmp1, t0); // [63...32][needle_len][needle_len][needle_len][needle_len] +++ slli(tmp1, t0, 32); +++ orr(tmp5, tmp1, t0); // tmp5: 8 elements [needle_len] +++ +++ mv(ch1, sp); // ch1 is t0 +++ mv(tmp6, ASIZE / STORE_BYTES); // loop iterations +++ +++ bind(BM_INIT_LOOP); +++ // for (i = 0; i < ASIZE; ++i) +++ // bc[i] = m; +++ for (int i = 0; i < 4; i++) { +++ sd(tmp5, Address(ch1, i * wordSize)); +++ } +++ add(ch1, ch1, 32); +++ sub(tmp6, tmp6, 4); +++ bgtz(tmp6, BM_INIT_LOOP); +++ +++ sub(nlen_tmp, needle_len, 1); // m - 1, index of the last element in pattern +++ Register orig_haystack = tmp5; +++ mv(orig_haystack, haystack); +++ // result_tmp = tmp4 +++ shadd(haystack_end, result_tmp, haystack, haystack_end, haystack_chr_shift); +++ sub(ch2, needle_len, 1); // bc offset init value, ch2 is t1 +++ mv(tmp3, needle); +++ +++ // for (i = 0; i < m - 1; ) { +++ // c = pattern[i]; +++ // ++i; +++ // // c < 256 for Latin1 string, so, no need for branch +++ // #ifdef PATTERN_STRING_IS_LATIN1 +++ // bc[c] = m - i; +++ // #else +++ // if (c < ASIZE) bc[c] = m - i; +++ // #endif +++ // } +++ bind(BCLOOP); +++ (this->*needle_load_1chr)(ch1, Address(tmp3), noreg); +++ add(tmp3, tmp3, needle_chr_size); +++ if (!needle_isL) { +++ // ae == StrIntrinsicNode::UU +++ mv(tmp6, ASIZE); +++ bgeu(ch1, tmp6, BCSKIP); +++ } +++ add(tmp4, sp, ch1); +++ sb(ch2, Address(tmp4)); // store skip offset to BC offset table +++ +++ bind(BCSKIP); +++ sub(ch2, ch2, 1); // for next pattern element, skip distance -1 +++ bgtz(ch2, BCLOOP); +++ +++ // tmp6: pattern end, address after needle +++ shadd(tmp6, needle_len, needle, tmp6, needle_chr_shift); +++ if (needle_isL == haystack_isL) { +++ // load last 8 bytes (8LL/4UU symbols) +++ ld(tmp6, Address(tmp6, -wordSize)); +++ } else { +++ // UL: from UTF-16(source) search Latin1(pattern) +++ lwu(tmp6, Address(tmp6, -wordSize / 2)); // load last 4 bytes(4 symbols) +++ // convert Latin1 to UTF. eg: 0x0000abcd -> 0x0a0b0c0d +++ // We'll have to wait until load completed, but it's still faster than per-character loads+checks +++ srli(tmp3, tmp6, BitsPerByte * (wordSize / 2 - needle_chr_size)); // pattern[m-1], eg:0x0000000a +++ slli(ch2, tmp6, XLEN - 24); +++ srli(ch2, ch2, XLEN - 8); // pattern[m-2], 0x0000000b +++ slli(ch1, tmp6, XLEN - 16); +++ srli(ch1, ch1, XLEN - 8); // pattern[m-3], 0x0000000c +++ andi(tmp6, tmp6, 0xff); // pattern[m-4], 0x0000000d +++ slli(ch2, ch2, 16); +++ orr(ch2, ch2, ch1); // 0x00000b0c +++ slli(result, tmp3, 48); // use result as temp register +++ orr(tmp6, tmp6, result); // 0x0a00000d +++ slli(result, ch2, 16); +++ orr(tmp6, tmp6, result); // UTF-16:0x0a0b0c0d +++ } +++ +++ // i = m - 1; +++ // skipch = j + i; +++ // if (skipch == pattern[m - 1] +++ // for (k = m - 2; k >= 0 && pattern[k] == src[k + j]; --k); +++ // else +++ // move j with bad char offset table +++ bind(BMLOOPSTR2); +++ // compare pattern to source string backward +++ shadd(result, nlen_tmp, haystack, result, haystack_chr_shift); +++ (this->*haystack_load_1chr)(skipch, Address(result), noreg); +++ sub(nlen_tmp, nlen_tmp, firstStep); // nlen_tmp is positive here, because needle_len >= 8 +++ if (needle_isL == haystack_isL) { +++ // re-init tmp3. It's for free because it's executed in parallel with +++ // load above. Alternative is to initialize it before loop, but it'll +++ // affect performance on in-order systems with 2 or more ld/st pipelines +++ srli(tmp3, tmp6, BitsPerByte * (wordSize - needle_chr_size)); // UU/LL: pattern[m-1] +++ } +++ if (!isLL) { // UU/UL case +++ slli(ch2, nlen_tmp, 1); // offsets in bytes +++ } +++ bne(tmp3, skipch, BMSKIP); // if not equal, skipch is bad char +++ add(result, haystack, isLL ? nlen_tmp : ch2); +++ ld(ch2, Address(result)); // load 8 bytes from source string +++ mv(ch1, tmp6); +++ if (isLL) { +++ j(BMLOOPSTR1_AFTER_LOAD); +++ } else { +++ sub(nlen_tmp, nlen_tmp, 1); // no need to branch for UU/UL case. cnt1 >= 8 +++ j(BMLOOPSTR1_CMP); +++ } +++ +++ bind(BMLOOPSTR1); +++ shadd(ch1, nlen_tmp, needle, ch1, needle_chr_shift); +++ (this->*needle_load_1chr)(ch1, Address(ch1), noreg); +++ shadd(ch2, nlen_tmp, haystack, ch2, haystack_chr_shift); +++ (this->*haystack_load_1chr)(ch2, Address(ch2), noreg); +++ +++ bind(BMLOOPSTR1_AFTER_LOAD); +++ sub(nlen_tmp, nlen_tmp, 1); +++ bltz(nlen_tmp, BMLOOPSTR1_LASTCMP); +++ +++ bind(BMLOOPSTR1_CMP); +++ beq(ch1, ch2, BMLOOPSTR1); +++ +++ bind(BMSKIP); +++ if (!isLL) { +++ // if we've met UTF symbol while searching Latin1 pattern, then we can +++ // skip needle_len symbols +++ if (needle_isL != haystack_isL) { +++ mv(result_tmp, needle_len); +++ } else { +++ mv(result_tmp, 1); +++ } +++ mv(t0, ASIZE); +++ bgeu(skipch, t0, BMADV); +++ } +++ add(result_tmp, sp, skipch); +++ lbu(result_tmp, Address(result_tmp)); // load skip offset +++ +++ bind(BMADV); +++ sub(nlen_tmp, needle_len, 1); +++ // move haystack after bad char skip offset +++ shadd(haystack, result_tmp, haystack, result, haystack_chr_shift); +++ ble(haystack, haystack_end, BMLOOPSTR2); +++ add(sp, sp, ASIZE); +++ j(NOMATCH); +++ +++ bind(BMLOOPSTR1_LASTCMP); +++ bne(ch1, ch2, BMSKIP); +++ +++ bind(BMMATCH); +++ sub(result, haystack, orig_haystack); +++ if (!haystack_isL) { +++ srli(result, result, 1); +++ } +++ add(sp, sp, ASIZE); +++ j(DONE); +++ +++ bind(LINEARSTUB); +++ sub(t0, needle_len, 16); // small patterns still should be handled by simple algorithm +++ bltz(t0, LINEARSEARCH); +++ mv(result, zr); +++ RuntimeAddress stub = NULL; +++ if (isLL) { +++ stub = RuntimeAddress(StubRoutines::riscv::string_indexof_linear_ll()); +++ assert(stub.target() != NULL, "string_indexof_linear_ll stub has not been generated"); +++ } else if (needle_isL) { +++ stub = RuntimeAddress(StubRoutines::riscv::string_indexof_linear_ul()); +++ assert(stub.target() != NULL, "string_indexof_linear_ul stub has not been generated"); +++ } else { +++ stub = RuntimeAddress(StubRoutines::riscv::string_indexof_linear_uu()); +++ assert(stub.target() != NULL, "string_indexof_linear_uu stub has not been generated"); +++ } +++ trampoline_call(stub); +++ j(DONE); +++ +++ bind(NOMATCH); +++ mv(result, -1); +++ j(DONE); +++ +++ bind(LINEARSEARCH); +++ string_indexof_linearscan(haystack, needle, haystack_len, needle_len, tmp1, tmp2, tmp3, tmp4, -1, result, ae); +++ +++ bind(DONE); +++ BLOCK_COMMENT("} string_indexof"); +++} +++ +++// string_indexof +++// result: x10 +++// src: x11 +++// src_count: x12 +++// pattern: x13 +++// pattern_count: x14 or 1/2/3/4 +++void C2_MacroAssembler::string_indexof_linearscan(Register haystack, Register needle, +++ Register haystack_len, Register needle_len, +++ Register tmp1, Register tmp2, +++ Register tmp3, Register tmp4, +++ int needle_con_cnt, Register result, int ae) +++{ +++ // Note: +++ // needle_con_cnt > 0 means needle_len register is invalid, needle length is constant +++ // for UU/LL: needle_con_cnt[1, 4], UL: needle_con_cnt = 1 +++ assert(needle_con_cnt <= 4, "Invalid needle constant count"); +++ assert(ae != StrIntrinsicNode::LU, "Invalid encoding"); +++ +++ Register ch1 = t0; +++ Register ch2 = t1; +++ Register hlen_neg = haystack_len, nlen_neg = needle_len; +++ Register nlen_tmp = tmp1, hlen_tmp = tmp2, result_tmp = tmp4; +++ +++ bool isLL = ae == StrIntrinsicNode::LL; +++ +++ bool needle_isL = ae == StrIntrinsicNode::LL || ae == StrIntrinsicNode::UL; +++ bool haystack_isL = ae == StrIntrinsicNode::LL || ae == StrIntrinsicNode::LU; +++ int needle_chr_shift = needle_isL ? 0 : 1; +++ int haystack_chr_shift = haystack_isL ? 0 : 1; +++ int needle_chr_size = needle_isL ? 1 : 2; +++ int haystack_chr_size = haystack_isL ? 1 : 2; +++ +++ load_chr_insn needle_load_1chr = needle_isL ? (load_chr_insn)&MacroAssembler::lbu : +++ (load_chr_insn)&MacroAssembler::lhu; +++ load_chr_insn haystack_load_1chr = haystack_isL ? (load_chr_insn)&MacroAssembler::lbu : +++ (load_chr_insn)&MacroAssembler::lhu; +++ load_chr_insn load_2chr = isLL ? (load_chr_insn)&MacroAssembler::lhu : (load_chr_insn)&MacroAssembler::lwu; +++ load_chr_insn load_4chr = isLL ? (load_chr_insn)&MacroAssembler::lwu : (load_chr_insn)&MacroAssembler::ld; +++ +++ Label DO1, DO2, DO3, MATCH, NOMATCH, DONE; +++ +++ Register first = tmp3; +++ +++ if (needle_con_cnt == -1) { +++ Label DOSHORT, FIRST_LOOP, STR2_NEXT, STR1_LOOP, STR1_NEXT; +++ +++ sub(t0, needle_len, needle_isL == haystack_isL ? 4 : 2); +++ bltz(t0, DOSHORT); +++ +++ (this->*needle_load_1chr)(first, Address(needle), noreg); +++ slli(t0, needle_len, needle_chr_shift); +++ add(needle, needle, t0); +++ neg(nlen_neg, t0); +++ slli(t0, result_tmp, haystack_chr_shift); +++ add(haystack, haystack, t0); +++ neg(hlen_neg, t0); +++ +++ bind(FIRST_LOOP); +++ add(t0, haystack, hlen_neg); +++ (this->*haystack_load_1chr)(ch2, Address(t0), noreg); +++ beq(first, ch2, STR1_LOOP); +++ +++ bind(STR2_NEXT); +++ add(hlen_neg, hlen_neg, haystack_chr_size); +++ blez(hlen_neg, FIRST_LOOP); +++ j(NOMATCH); +++ +++ bind(STR1_LOOP); +++ add(nlen_tmp, nlen_neg, needle_chr_size); +++ add(hlen_tmp, hlen_neg, haystack_chr_size); +++ bgez(nlen_tmp, MATCH); +++ +++ bind(STR1_NEXT); +++ add(ch1, needle, nlen_tmp); +++ (this->*needle_load_1chr)(ch1, Address(ch1), noreg); +++ add(ch2, haystack, hlen_tmp); +++ (this->*haystack_load_1chr)(ch2, Address(ch2), noreg); +++ bne(ch1, ch2, STR2_NEXT); +++ add(nlen_tmp, nlen_tmp, needle_chr_size); +++ add(hlen_tmp, hlen_tmp, haystack_chr_size); +++ bltz(nlen_tmp, STR1_NEXT); +++ j(MATCH); +++ +++ bind(DOSHORT); +++ if (needle_isL == haystack_isL) { +++ sub(t0, needle_len, 2); +++ bltz(t0, DO1); +++ bgtz(t0, DO3); +++ } +++ } +++ +++ if (needle_con_cnt == 4) { +++ Label CH1_LOOP; +++ (this->*load_4chr)(ch1, Address(needle), noreg); +++ sub(result_tmp, haystack_len, 4); +++ slli(tmp3, result_tmp, haystack_chr_shift); // result as tmp +++ add(haystack, haystack, tmp3); +++ neg(hlen_neg, tmp3); +++ +++ bind(CH1_LOOP); +++ add(ch2, haystack, hlen_neg); +++ (this->*load_4chr)(ch2, Address(ch2), noreg); +++ beq(ch1, ch2, MATCH); +++ add(hlen_neg, hlen_neg, haystack_chr_size); +++ blez(hlen_neg, CH1_LOOP); +++ j(NOMATCH); +++ } +++ +++ if ((needle_con_cnt == -1 && needle_isL == haystack_isL) || needle_con_cnt == 2) { +++ Label CH1_LOOP; +++ BLOCK_COMMENT("string_indexof DO2 {"); +++ bind(DO2); +++ (this->*load_2chr)(ch1, Address(needle), noreg); +++ if (needle_con_cnt == 2) { +++ sub(result_tmp, haystack_len, 2); +++ } +++ slli(tmp3, result_tmp, haystack_chr_shift); +++ add(haystack, haystack, tmp3); +++ neg(hlen_neg, tmp3); +++ +++ bind(CH1_LOOP); +++ add(tmp3, haystack, hlen_neg); +++ (this->*load_2chr)(ch2, Address(tmp3), noreg); +++ beq(ch1, ch2, MATCH); +++ add(hlen_neg, hlen_neg, haystack_chr_size); +++ blez(hlen_neg, CH1_LOOP); +++ j(NOMATCH); +++ BLOCK_COMMENT("} string_indexof DO2"); +++ } +++ +++ if ((needle_con_cnt == -1 && needle_isL == haystack_isL) || needle_con_cnt == 3) { +++ Label FIRST_LOOP, STR2_NEXT, STR1_LOOP; +++ BLOCK_COMMENT("string_indexof DO3 {"); +++ +++ bind(DO3); +++ (this->*load_2chr)(first, Address(needle), noreg); +++ (this->*needle_load_1chr)(ch1, Address(needle, 2 * needle_chr_size), noreg); +++ if (needle_con_cnt == 3) { +++ sub(result_tmp, haystack_len, 3); +++ } +++ slli(hlen_tmp, result_tmp, haystack_chr_shift); +++ add(haystack, haystack, hlen_tmp); +++ neg(hlen_neg, hlen_tmp); +++ +++ bind(FIRST_LOOP); +++ add(ch2, haystack, hlen_neg); +++ (this->*load_2chr)(ch2, Address(ch2), noreg); +++ beq(first, ch2, STR1_LOOP); +++ +++ bind(STR2_NEXT); +++ add(hlen_neg, hlen_neg, haystack_chr_size); +++ blez(hlen_neg, FIRST_LOOP); +++ j(NOMATCH); +++ +++ bind(STR1_LOOP); +++ add(hlen_tmp, hlen_neg, 2 * haystack_chr_size); +++ add(ch2, haystack, hlen_tmp); +++ (this->*haystack_load_1chr)(ch2, Address(ch2), noreg); +++ bne(ch1, ch2, STR2_NEXT); +++ j(MATCH); +++ BLOCK_COMMENT("} string_indexof DO3"); +++ } +++ +++ if (needle_con_cnt == -1 || needle_con_cnt == 1) { +++ Label DO1_LOOP; +++ +++ BLOCK_COMMENT("string_indexof DO1 {"); +++ bind(DO1); +++ (this->*needle_load_1chr)(ch1, Address(needle), noreg); +++ sub(result_tmp, haystack_len, 1); +++ mv(tmp3, result_tmp); +++ if (haystack_chr_shift) { +++ slli(tmp3, result_tmp, haystack_chr_shift); +++ } +++ add(haystack, haystack, tmp3); +++ neg(hlen_neg, tmp3); +++ +++ bind(DO1_LOOP); +++ add(tmp3, haystack, hlen_neg); +++ (this->*haystack_load_1chr)(ch2, Address(tmp3), noreg); +++ beq(ch1, ch2, MATCH); +++ add(hlen_neg, hlen_neg, haystack_chr_size); +++ blez(hlen_neg, DO1_LOOP); +++ BLOCK_COMMENT("} string_indexof DO1"); +++ } +++ +++ bind(NOMATCH); +++ mv(result, -1); +++ j(DONE); +++ +++ bind(MATCH); +++ srai(t0, hlen_neg, haystack_chr_shift); +++ add(result, result_tmp, t0); +++ +++ bind(DONE); +++} +++ +++// Compare strings. +++void C2_MacroAssembler::string_compare(Register str1, Register str2, +++ Register cnt1, Register cnt2, Register result, Register tmp1, Register tmp2, +++ Register tmp3, int ae) +++{ +++ Label DONE, SHORT_LOOP, SHORT_STRING, SHORT_LAST, TAIL, STUB, +++ DIFFERENCE, NEXT_WORD, SHORT_LOOP_TAIL, SHORT_LAST2, SHORT_LAST_INIT, +++ SHORT_LOOP_START, TAIL_CHECK, L; +++ +++ const int STUB_THRESHOLD = 64 + 8; +++ bool isLL = ae == StrIntrinsicNode::LL; +++ bool isLU = ae == StrIntrinsicNode::LU; +++ bool isUL = ae == StrIntrinsicNode::UL; +++ +++ bool str1_isL = isLL || isLU; +++ bool str2_isL = isLL || isUL; +++ +++ // for L strings, 1 byte for 1 character +++ // for U strings, 2 bytes for 1 character +++ int str1_chr_size = str1_isL ? 1 : 2; +++ int str2_chr_size = str2_isL ? 1 : 2; +++ int minCharsInWord = isLL ? wordSize : wordSize / 2; +++ +++ load_chr_insn str1_load_chr = str1_isL ? (load_chr_insn)&MacroAssembler::lbu : (load_chr_insn)&MacroAssembler::lhu; +++ load_chr_insn str2_load_chr = str2_isL ? (load_chr_insn)&MacroAssembler::lbu : (load_chr_insn)&MacroAssembler::lhu; +++ +++ BLOCK_COMMENT("string_compare {"); +++ +++ // Bizzarely, the counts are passed in bytes, regardless of whether they +++ // are L or U strings, however the result is always in characters. +++ if (!str1_isL) { +++ sraiw(cnt1, cnt1, 1); +++ } +++ if (!str2_isL) { +++ sraiw(cnt2, cnt2, 1); +++ } +++ +++ // Compute the minimum of the string lengths and save the difference in result. +++ sub(result, cnt1, cnt2); +++ bgt(cnt1, cnt2, L); +++ mv(cnt2, cnt1); +++ bind(L); +++ +++ // A very short string +++ mv(t0, minCharsInWord); +++ ble(cnt2, t0, SHORT_STRING); +++ +++ // Compare longwords +++ // load first parts of strings and finish initialization while loading +++ { +++ if (str1_isL == str2_isL) { // LL or UU +++ // load 8 bytes once to compare +++ ld(tmp1, Address(str1)); +++ beq(str1, str2, DONE); +++ ld(tmp2, Address(str2)); +++ mv(t0, STUB_THRESHOLD); +++ bge(cnt2, t0, STUB); +++ sub(cnt2, cnt2, minCharsInWord); +++ beqz(cnt2, TAIL_CHECK); +++ // convert cnt2 from characters to bytes +++ if (!str1_isL) { +++ slli(cnt2, cnt2, 1); +++ } +++ add(str2, str2, cnt2); +++ add(str1, str1, cnt2); +++ sub(cnt2, zr, cnt2); +++ } else if (isLU) { // LU case +++ lwu(tmp1, Address(str1)); +++ ld(tmp2, Address(str2)); +++ mv(t0, STUB_THRESHOLD); +++ bge(cnt2, t0, STUB); +++ addi(cnt2, cnt2, -4); +++ add(str1, str1, cnt2); +++ sub(cnt1, zr, cnt2); +++ slli(cnt2, cnt2, 1); +++ add(str2, str2, cnt2); +++ inflate_lo32(tmp3, tmp1); +++ mv(tmp1, tmp3); +++ sub(cnt2, zr, cnt2); +++ addi(cnt1, cnt1, 4); +++ } else { // UL case +++ ld(tmp1, Address(str1)); +++ lwu(tmp2, Address(str2)); +++ mv(t0, STUB_THRESHOLD); +++ bge(cnt2, t0, STUB); +++ addi(cnt2, cnt2, -4); +++ slli(t0, cnt2, 1); +++ sub(cnt1, zr, t0); +++ add(str1, str1, t0); +++ add(str2, str2, cnt2); +++ inflate_lo32(tmp3, tmp2); +++ mv(tmp2, tmp3); +++ sub(cnt2, zr, cnt2); +++ addi(cnt1, cnt1, 8); +++ } +++ addi(cnt2, cnt2, isUL ? 4 : 8); +++ bgez(cnt2, TAIL); +++ xorr(tmp3, tmp1, tmp2); +++ bnez(tmp3, DIFFERENCE); +++ +++ // main loop +++ bind(NEXT_WORD); +++ if (str1_isL == str2_isL) { // LL or UU +++ add(t0, str1, cnt2); +++ ld(tmp1, Address(t0)); +++ add(t0, str2, cnt2); +++ ld(tmp2, Address(t0)); +++ addi(cnt2, cnt2, 8); +++ } else if (isLU) { // LU case +++ add(t0, str1, cnt1); +++ lwu(tmp1, Address(t0)); +++ add(t0, str2, cnt2); +++ ld(tmp2, Address(t0)); +++ addi(cnt1, cnt1, 4); +++ inflate_lo32(tmp3, tmp1); +++ mv(tmp1, tmp3); +++ addi(cnt2, cnt2, 8); +++ } else { // UL case +++ add(t0, str2, cnt2); +++ lwu(tmp2, Address(t0)); +++ add(t0, str1, cnt1); +++ ld(tmp1, Address(t0)); +++ inflate_lo32(tmp3, tmp2); +++ mv(tmp2, tmp3); +++ addi(cnt1, cnt1, 8); +++ addi(cnt2, cnt2, 4); +++ } +++ bgez(cnt2, TAIL); +++ +++ xorr(tmp3, tmp1, tmp2); +++ beqz(tmp3, NEXT_WORD); +++ j(DIFFERENCE); +++ bind(TAIL); +++ xorr(tmp3, tmp1, tmp2); +++ bnez(tmp3, DIFFERENCE); +++ // Last longword. In the case where length == 4 we compare the +++ // same longword twice, but that's still faster than another +++ // conditional branch. +++ if (str1_isL == str2_isL) { // LL or UU +++ ld(tmp1, Address(str1)); +++ ld(tmp2, Address(str2)); +++ } else if (isLU) { // LU case +++ lwu(tmp1, Address(str1)); +++ ld(tmp2, Address(str2)); +++ inflate_lo32(tmp3, tmp1); +++ mv(tmp1, tmp3); +++ } else { // UL case +++ lwu(tmp2, Address(str2)); +++ ld(tmp1, Address(str1)); +++ inflate_lo32(tmp3, tmp2); +++ mv(tmp2, tmp3); +++ } +++ bind(TAIL_CHECK); +++ xorr(tmp3, tmp1, tmp2); +++ beqz(tmp3, DONE); +++ +++ // Find the first different characters in the longwords and +++ // compute their difference. +++ bind(DIFFERENCE); +++ ctzc_bit(result, tmp3, isLL); // count zero from lsb to msb +++ srl(tmp1, tmp1, result); +++ srl(tmp2, tmp2, result); +++ if (isLL) { +++ andi(tmp1, tmp1, 0xFF); +++ andi(tmp2, tmp2, 0xFF); +++ } else { +++ andi(tmp1, tmp1, 0xFFFF); +++ andi(tmp2, tmp2, 0xFFFF); +++ } +++ sub(result, tmp1, tmp2); +++ j(DONE); +++ } +++ +++ bind(STUB); +++ RuntimeAddress stub = NULL; +++ switch (ae) { +++ case StrIntrinsicNode::LL: +++ stub = RuntimeAddress(StubRoutines::riscv::compare_long_string_LL()); +++ break; +++ case StrIntrinsicNode::UU: +++ stub = RuntimeAddress(StubRoutines::riscv::compare_long_string_UU()); +++ break; +++ case StrIntrinsicNode::LU: +++ stub = RuntimeAddress(StubRoutines::riscv::compare_long_string_LU()); +++ break; +++ case StrIntrinsicNode::UL: +++ stub = RuntimeAddress(StubRoutines::riscv::compare_long_string_UL()); +++ break; +++ default: +++ ShouldNotReachHere(); +++ } +++ assert(stub.target() != NULL, "compare_long_string stub has not been generated"); +++ trampoline_call(stub); +++ j(DONE); +++ +++ bind(SHORT_STRING); +++ // Is the minimum length zero? +++ beqz(cnt2, DONE); +++ // arrange code to do most branches while loading and loading next characters +++ // while comparing previous +++ (this->*str1_load_chr)(tmp1, Address(str1), t0); +++ addi(str1, str1, str1_chr_size); +++ addi(cnt2, cnt2, -1); +++ beqz(cnt2, SHORT_LAST_INIT); +++ (this->*str2_load_chr)(cnt1, Address(str2), t0); +++ addi(str2, str2, str2_chr_size); +++ j(SHORT_LOOP_START); +++ bind(SHORT_LOOP); +++ addi(cnt2, cnt2, -1); +++ beqz(cnt2, SHORT_LAST); +++ bind(SHORT_LOOP_START); +++ (this->*str1_load_chr)(tmp2, Address(str1), t0); +++ addi(str1, str1, str1_chr_size); +++ (this->*str2_load_chr)(t0, Address(str2), t0); +++ addi(str2, str2, str2_chr_size); +++ bne(tmp1, cnt1, SHORT_LOOP_TAIL); +++ addi(cnt2, cnt2, -1); +++ beqz(cnt2, SHORT_LAST2); +++ (this->*str1_load_chr)(tmp1, Address(str1), t0); +++ addi(str1, str1, str1_chr_size); +++ (this->*str2_load_chr)(cnt1, Address(str2), t0); +++ addi(str2, str2, str2_chr_size); +++ beq(tmp2, t0, SHORT_LOOP); +++ sub(result, tmp2, t0); +++ j(DONE); +++ bind(SHORT_LOOP_TAIL); +++ sub(result, tmp1, cnt1); +++ j(DONE); +++ bind(SHORT_LAST2); +++ beq(tmp2, t0, DONE); +++ sub(result, tmp2, t0); +++ +++ j(DONE); +++ bind(SHORT_LAST_INIT); +++ (this->*str2_load_chr)(cnt1, Address(str2), t0); +++ addi(str2, str2, str2_chr_size); +++ bind(SHORT_LAST); +++ beq(tmp1, cnt1, DONE); +++ sub(result, tmp1, cnt1); +++ +++ bind(DONE); +++ +++ BLOCK_COMMENT("} string_compare"); +++} +++ +++void C2_MacroAssembler::arrays_equals(Register a1, Register a2, Register tmp3, +++ Register tmp4, Register tmp5, Register tmp6, Register result, +++ Register cnt1, int elem_size) { +++ Label DONE, SAME, NEXT_DWORD, SHORT, TAIL, TAIL2, IS_TMP5_ZR; +++ Register tmp1 = t0; +++ Register tmp2 = t1; +++ Register cnt2 = tmp2; // cnt2 only used in array length compare +++ Register elem_per_word = tmp6; +++ int log_elem_size = exact_log2(elem_size); +++ int length_offset = arrayOopDesc::length_offset_in_bytes(); +++ int base_offset = arrayOopDesc::base_offset_in_bytes(elem_size == 2 ? T_CHAR : T_BYTE); +++ +++ assert(elem_size == 1 || elem_size == 2, "must be char or byte"); +++ assert_different_registers(a1, a2, result, cnt1, t0, t1, tmp3, tmp4, tmp5, tmp6); +++ mv(elem_per_word, wordSize / elem_size); +++ +++ BLOCK_COMMENT("arrays_equals {"); +++ +++ // if (a1 == a2), return true +++ beq(a1, a2, SAME); +++ +++ mv(result, false); +++ beqz(a1, DONE); +++ beqz(a2, DONE); +++ lwu(cnt1, Address(a1, length_offset)); +++ lwu(cnt2, Address(a2, length_offset)); +++ bne(cnt2, cnt1, DONE); +++ beqz(cnt1, SAME); +++ +++ slli(tmp5, cnt1, 3 + log_elem_size); +++ sub(tmp5, zr, tmp5); +++ add(a1, a1, base_offset); +++ add(a2, a2, base_offset); +++ ld(tmp3, Address(a1, 0)); +++ ld(tmp4, Address(a2, 0)); +++ ble(cnt1, elem_per_word, SHORT); // short or same +++ +++ // Main 16 byte comparison loop with 2 exits +++ bind(NEXT_DWORD); { +++ ld(tmp1, Address(a1, wordSize)); +++ ld(tmp2, Address(a2, wordSize)); +++ sub(cnt1, cnt1, 2 * wordSize / elem_size); +++ blez(cnt1, TAIL); +++ bne(tmp3, tmp4, DONE); +++ ld(tmp3, Address(a1, 2 * wordSize)); +++ ld(tmp4, Address(a2, 2 * wordSize)); +++ add(a1, a1, 2 * wordSize); +++ add(a2, a2, 2 * wordSize); +++ ble(cnt1, elem_per_word, TAIL2); +++ } beq(tmp1, tmp2, NEXT_DWORD); +++ j(DONE); +++ +++ bind(TAIL); +++ xorr(tmp4, tmp3, tmp4); +++ xorr(tmp2, tmp1, tmp2); +++ sll(tmp2, tmp2, tmp5); +++ orr(tmp5, tmp4, tmp2); +++ j(IS_TMP5_ZR); +++ +++ bind(TAIL2); +++ bne(tmp1, tmp2, DONE); +++ +++ bind(SHORT); +++ xorr(tmp4, tmp3, tmp4); +++ sll(tmp5, tmp4, tmp5); +++ +++ bind(IS_TMP5_ZR); +++ bnez(tmp5, DONE); +++ +++ bind(SAME); +++ mv(result, true); +++ // That's it. +++ bind(DONE); +++ +++ BLOCK_COMMENT("} array_equals"); +++} +++ +++// Compare Strings +++ +++// For Strings we're passed the address of the first characters in a1 +++// and a2 and the length in cnt1. +++// elem_size is the element size in bytes: either 1 or 2. +++// There are two implementations. For arrays >= 8 bytes, all +++// comparisons (including the final one, which may overlap) are +++// performed 8 bytes at a time. For strings < 8 bytes, we compare a +++// halfword, then a short, and then a byte. +++ +++void C2_MacroAssembler::string_equals(Register a1, Register a2, +++ Register result, Register cnt1, int elem_size) +++{ +++ Label SAME, DONE, SHORT, NEXT_WORD; +++ Register tmp1 = t0; +++ Register tmp2 = t1; +++ +++ assert(elem_size == 1 || elem_size == 2, "must be 2 or 1 byte"); +++ assert_different_registers(a1, a2, result, cnt1, t0, t1); +++ +++ BLOCK_COMMENT("string_equals {"); +++ +++ mv(result, false); +++ +++ // Check for short strings, i.e. smaller than wordSize. +++ sub(cnt1, cnt1, wordSize); +++ bltz(cnt1, SHORT); +++ +++ // Main 8 byte comparison loop. +++ bind(NEXT_WORD); { +++ ld(tmp1, Address(a1, 0)); +++ add(a1, a1, wordSize); +++ ld(tmp2, Address(a2, 0)); +++ add(a2, a2, wordSize); +++ sub(cnt1, cnt1, wordSize); +++ bne(tmp1, tmp2, DONE); +++ } bgtz(cnt1, NEXT_WORD); +++ +++ // Last longword. In the case where length == 4 we compare the +++ // same longword twice, but that's still faster than another +++ // conditional branch. +++ // cnt1 could be 0, -1, -2, -3, -4 for chars; -4 only happens when +++ // length == 4. +++ add(tmp1, a1, cnt1); +++ ld(tmp1, Address(tmp1, 0)); +++ add(tmp2, a2, cnt1); +++ ld(tmp2, Address(tmp2, 0)); +++ bne(tmp1, tmp2, DONE); +++ j(SAME); +++ +++ bind(SHORT); +++ Label TAIL03, TAIL01; +++ +++ // 0-7 bytes left. +++ test_bit(t0, cnt1, 2); +++ beqz(t0, TAIL03); +++ { +++ lwu(tmp1, Address(a1, 0)); +++ add(a1, a1, 4); +++ lwu(tmp2, Address(a2, 0)); +++ add(a2, a2, 4); +++ bne(tmp1, tmp2, DONE); +++ } +++ +++ bind(TAIL03); +++ // 0-3 bytes left. +++ test_bit(t0, cnt1, 1); +++ beqz(t0, TAIL01); +++ { +++ lhu(tmp1, Address(a1, 0)); +++ add(a1, a1, 2); +++ lhu(tmp2, Address(a2, 0)); +++ add(a2, a2, 2); +++ bne(tmp1, tmp2, DONE); +++ } +++ +++ bind(TAIL01); +++ if (elem_size == 1) { // Only needed when comparing 1-byte elements +++ // 0-1 bytes left. +++ test_bit(t0, cnt1, 0); +++ beqz(t0, SAME); +++ { +++ lbu(tmp1, Address(a1, 0)); +++ lbu(tmp2, Address(a2, 0)); +++ bne(tmp1, tmp2, DONE); +++ } +++ } +++ +++ // Arrays are equal. +++ bind(SAME); +++ mv(result, true); +++ +++ // That's it. +++ bind(DONE); +++ BLOCK_COMMENT("} string_equals"); +++} +++ +++typedef void (Assembler::*conditional_branch_insn)(Register op1, Register op2, Label& label, bool is_far); +++typedef void (MacroAssembler::*float_conditional_branch_insn)(FloatRegister op1, FloatRegister op2, Label& label, +++ bool is_far, bool is_unordered); +++ +++static conditional_branch_insn conditional_branches[] = +++{ +++ /* SHORT branches */ +++ (conditional_branch_insn)&MacroAssembler::beq, +++ (conditional_branch_insn)&MacroAssembler::bgt, +++ NULL, // BoolTest::overflow +++ (conditional_branch_insn)&MacroAssembler::blt, +++ (conditional_branch_insn)&MacroAssembler::bne, +++ (conditional_branch_insn)&MacroAssembler::ble, +++ NULL, // BoolTest::no_overflow +++ (conditional_branch_insn)&MacroAssembler::bge, +++ +++ /* UNSIGNED branches */ +++ (conditional_branch_insn)&MacroAssembler::beq, +++ (conditional_branch_insn)&MacroAssembler::bgtu, +++ NULL, +++ (conditional_branch_insn)&MacroAssembler::bltu, +++ (conditional_branch_insn)&MacroAssembler::bne, +++ (conditional_branch_insn)&MacroAssembler::bleu, +++ NULL, +++ (conditional_branch_insn)&MacroAssembler::bgeu +++}; +++ +++static float_conditional_branch_insn float_conditional_branches[] = +++{ +++ /* FLOAT SHORT branches */ +++ (float_conditional_branch_insn)&MacroAssembler::float_beq, +++ (float_conditional_branch_insn)&MacroAssembler::float_bgt, +++ NULL, // BoolTest::overflow +++ (float_conditional_branch_insn)&MacroAssembler::float_blt, +++ (float_conditional_branch_insn)&MacroAssembler::float_bne, +++ (float_conditional_branch_insn)&MacroAssembler::float_ble, +++ NULL, // BoolTest::no_overflow +++ (float_conditional_branch_insn)&MacroAssembler::float_bge, +++ +++ /* DOUBLE SHORT branches */ +++ (float_conditional_branch_insn)&MacroAssembler::double_beq, +++ (float_conditional_branch_insn)&MacroAssembler::double_bgt, +++ NULL, +++ (float_conditional_branch_insn)&MacroAssembler::double_blt, +++ (float_conditional_branch_insn)&MacroAssembler::double_bne, +++ (float_conditional_branch_insn)&MacroAssembler::double_ble, +++ NULL, +++ (float_conditional_branch_insn)&MacroAssembler::double_bge +++}; +++ +++void C2_MacroAssembler::cmp_branch(int cmpFlag, Register op1, Register op2, Label& label, bool is_far) { +++ assert(cmpFlag >= 0 && cmpFlag < (int)(sizeof(conditional_branches) / sizeof(conditional_branches[0])), +++ "invalid conditional branch index"); +++ (this->*conditional_branches[cmpFlag])(op1, op2, label, is_far); +++} +++ +++// This is a function should only be used by C2. Flip the unordered when unordered-greater, C2 would use +++// unordered-lesser instead of unordered-greater. Finally, commute the result bits at function do_one_bytecode(). +++void C2_MacroAssembler::float_cmp_branch(int cmpFlag, FloatRegister op1, FloatRegister op2, Label& label, bool is_far) { +++ assert(cmpFlag >= 0 && cmpFlag < (int)(sizeof(float_conditional_branches) / sizeof(float_conditional_branches[0])), +++ "invalid float conditional branch index"); +++ int booltest_flag = cmpFlag & ~(C2_MacroAssembler::double_branch_mask); +++ (this->*float_conditional_branches[cmpFlag])(op1, op2, label, is_far, +++ (booltest_flag == (BoolTest::ge) || booltest_flag == (BoolTest::gt)) ? false : true); +++} +++ +++void C2_MacroAssembler::enc_cmpUEqNeLeGt_imm0_branch(int cmpFlag, Register op1, Label& L, bool is_far) { +++ switch (cmpFlag) { +++ case BoolTest::eq: +++ case BoolTest::le: +++ beqz(op1, L, is_far); +++ break; +++ case BoolTest::ne: +++ case BoolTest::gt: +++ bnez(op1, L, is_far); +++ break; +++ default: +++ ShouldNotReachHere(); +++ } +++} +++ +++void C2_MacroAssembler::enc_cmpEqNe_imm0_branch(int cmpFlag, Register op1, Label& L, bool is_far) { +++ switch (cmpFlag) { +++ case BoolTest::eq: +++ beqz(op1, L, is_far); +++ break; +++ case BoolTest::ne: +++ bnez(op1, L, is_far); +++ break; +++ default: +++ ShouldNotReachHere(); +++ } +++} +++ +++void C2_MacroAssembler::enc_cmove(int cmpFlag, Register op1, Register op2, Register dst, Register src) { +++ Label L; +++ cmp_branch(cmpFlag ^ (1 << neg_cond_bits), op1, op2, L); +++ mv(dst, src); +++ bind(L); +++} +++ +++// Set dst to NaN if any NaN input. +++void C2_MacroAssembler::minmax_FD(FloatRegister dst, FloatRegister src1, FloatRegister src2, +++ bool is_double, bool is_min) { +++ assert_different_registers(dst, src1, src2); +++ +++ Label Done, Compare; +++ +++ is_double ? fclass_d(t0, src1) +++ : fclass_s(t0, src1); +++ is_double ? fclass_d(t1, src2) +++ : fclass_s(t1, src2); +++ orr(t0, t0, t1); +++ andi(t0, t0, 0b1100000000); //if src1 or src2 is quiet or signaling NaN then return NaN +++ beqz(t0, Compare); +++ is_double ? fadd_d(dst, src1, src2) +++ : fadd_s(dst, src1, src2); +++ j(Done); +++ +++ bind(Compare); +++ if (is_double) { +++ is_min ? fmin_d(dst, src1, src2) +++ : fmax_d(dst, src1, src2); +++ } else { +++ is_min ? fmin_s(dst, src1, src2) +++ : fmax_s(dst, src1, src2); +++ } +++ +++ bind(Done); +++} +++ +++void C2_MacroAssembler::element_compare(Register a1, Register a2, Register result, Register cnt, Register tmp1, Register tmp2, +++ VectorRegister vr1, VectorRegister vr2, VectorRegister vrs, bool islatin, Label &DONE) { +++ Label loop; +++ Assembler::SEW sew = islatin ? Assembler::e8 : Assembler::e16; +++ +++ bind(loop); +++ vsetvli(tmp1, cnt, sew, Assembler::m2); +++ vlex_v(vr1, a1, sew); +++ vlex_v(vr2, a2, sew); +++ vmsne_vv(vrs, vr1, vr2); +++ vfirst_m(tmp2, vrs); +++ bgez(tmp2, DONE); +++ sub(cnt, cnt, tmp1); +++ if (!islatin) { +++ slli(tmp1, tmp1, 1); // get byte counts +++ } +++ add(a1, a1, tmp1); +++ add(a2, a2, tmp1); +++ bnez(cnt, loop); +++ +++ mv(result, true); +++} +++ +++void C2_MacroAssembler::string_equals_v(Register a1, Register a2, Register result, Register cnt, int elem_size) { +++ Label DONE; +++ Register tmp1 = t0; +++ Register tmp2 = t1; +++ +++ BLOCK_COMMENT("string_equals_v {"); +++ +++ mv(result, false); +++ +++ if (elem_size == 2) { +++ srli(cnt, cnt, 1); +++ } +++ +++ element_compare(a1, a2, result, cnt, tmp1, tmp2, v0, v2, v0, elem_size == 1, DONE); +++ +++ bind(DONE); +++ BLOCK_COMMENT("} string_equals_v"); +++} +++ +++// used by C2 ClearArray patterns. +++// base: Address of a buffer to be zeroed +++// cnt: Count in HeapWords +++// +++// base, cnt, v0, v1 and t0 are clobbered. +++void C2_MacroAssembler::clear_array_v(Register base, Register cnt) { +++ Label loop; +++ +++ // making zero words +++ vsetvli(t0, cnt, Assembler::e64, Assembler::m4); +++ vxor_vv(v0, v0, v0); +++ +++ bind(loop); +++ vsetvli(t0, cnt, Assembler::e64, Assembler::m4); +++ vse64_v(v0, base); +++ sub(cnt, cnt, t0); +++ shadd(base, t0, base, t0, 3); +++ bnez(cnt, loop); +++} +++ +++void C2_MacroAssembler::arrays_equals_v(Register a1, Register a2, Register result, +++ Register cnt1, int elem_size) { +++ Label DONE; +++ Register tmp1 = t0; +++ Register tmp2 = t1; +++ Register cnt2 = tmp2; +++ int length_offset = arrayOopDesc::length_offset_in_bytes(); +++ int base_offset = arrayOopDesc::base_offset_in_bytes(elem_size == 2 ? T_CHAR : T_BYTE); +++ +++ BLOCK_COMMENT("arrays_equals_v {"); +++ +++ // if (a1 == a2), return true +++ mv(result, true); +++ beq(a1, a2, DONE); +++ +++ mv(result, false); +++ // if a1 == null or a2 == null, return false +++ beqz(a1, DONE); +++ beqz(a2, DONE); +++ // if (a1.length != a2.length), return false +++ lwu(cnt1, Address(a1, length_offset)); +++ lwu(cnt2, Address(a2, length_offset)); +++ bne(cnt1, cnt2, DONE); +++ +++ la(a1, Address(a1, base_offset)); +++ la(a2, Address(a2, base_offset)); +++ +++ element_compare(a1, a2, result, cnt1, tmp1, tmp2, v0, v2, v0, elem_size == 1, DONE); +++ +++ bind(DONE); +++ +++ BLOCK_COMMENT("} arrays_equals_v"); +++} +++ +++void C2_MacroAssembler::string_compare_v(Register str1, Register str2, Register cnt1, Register cnt2, +++ Register result, Register tmp1, Register tmp2, int encForm) { +++ Label DIFFERENCE, DONE, L, loop; +++ bool encLL = encForm == StrIntrinsicNode::LL; +++ bool encLU = encForm == StrIntrinsicNode::LU; +++ bool encUL = encForm == StrIntrinsicNode::UL; +++ +++ bool str1_isL = encLL || encLU; +++ bool str2_isL = encLL || encUL; +++ +++ int minCharsInWord = encLL ? wordSize : wordSize / 2; +++ +++ BLOCK_COMMENT("string_compare {"); +++ +++ // for Lating strings, 1 byte for 1 character +++ // for UTF16 strings, 2 bytes for 1 character +++ if (!str1_isL) +++ sraiw(cnt1, cnt1, 1); +++ if (!str2_isL) +++ sraiw(cnt2, cnt2, 1); +++ +++ // if str1 == str2, return the difference +++ // save the minimum of the string lengths in cnt2. +++ sub(result, cnt1, cnt2); +++ bgt(cnt1, cnt2, L); +++ mv(cnt2, cnt1); +++ bind(L); +++ +++ if (str1_isL == str2_isL) { // LL or UU +++ element_compare(str1, str2, zr, cnt2, tmp1, tmp2, v2, v4, v1, encLL, DIFFERENCE); +++ j(DONE); +++ } else { // LU or UL +++ Register strL = encLU ? str1 : str2; +++ Register strU = encLU ? str2 : str1; +++ VectorRegister vstr1 = encLU ? v4 : v0; +++ VectorRegister vstr2 = encLU ? v0 : v4; +++ +++ bind(loop); +++ vsetvli(tmp1, cnt2, Assembler::e8, Assembler::m2); +++ vle8_v(vstr1, strL); +++ vsetvli(tmp1, cnt2, Assembler::e16, Assembler::m4); +++ vzext_vf2(vstr2, vstr1); +++ vle16_v(vstr1, strU); +++ vmsne_vv(v0, vstr2, vstr1); +++ vfirst_m(tmp2, v0); +++ bgez(tmp2, DIFFERENCE); +++ sub(cnt2, cnt2, tmp1); +++ add(strL, strL, tmp1); +++ shadd(strU, tmp1, strU, tmp1, 1); +++ bnez(cnt2, loop); +++ j(DONE); +++ } +++ bind(DIFFERENCE); +++ slli(tmp1, tmp2, 1); +++ add(str1, str1, str1_isL ? tmp2 : tmp1); +++ add(str2, str2, str2_isL ? tmp2 : tmp1); +++ str1_isL ? lbu(tmp1, Address(str1, 0)) : lhu(tmp1, Address(str1, 0)); +++ str2_isL ? lbu(tmp2, Address(str2, 0)) : lhu(tmp2, Address(str2, 0)); +++ sub(result, tmp1, tmp2); +++ +++ bind(DONE); +++} +++ +++void C2_MacroAssembler::byte_array_inflate_v(Register src, Register dst, Register len, Register tmp) { +++ Label loop; +++ assert_different_registers(src, dst, len, tmp, t0); +++ +++ BLOCK_COMMENT("byte_array_inflate_v {"); +++ bind(loop); +++ vsetvli(tmp, len, Assembler::e8, Assembler::m2); +++ vle8_v(v2, src); +++ vsetvli(t0, len, Assembler::e16, Assembler::m4); +++ vzext_vf2(v0, v2); +++ vse16_v(v0, dst); +++ sub(len, len, tmp); +++ add(src, src, tmp); +++ shadd(dst, tmp, dst, tmp, 1); +++ bnez(len, loop); +++ BLOCK_COMMENT("} byte_array_inflate_v"); +++} +++ +++// Compress char[] array to byte[]. +++// result: the array length if every element in array can be encoded; 0, otherwise. +++void C2_MacroAssembler::char_array_compress_v(Register src, Register dst, Register len, Register result, Register tmp) { +++ Label done; +++ encode_iso_array_v(src, dst, len, result, tmp); +++ beqz(len, done); +++ mv(result, zr); +++ bind(done); +++} +++ +++// result: the number of elements had been encoded. +++void C2_MacroAssembler::encode_iso_array_v(Register src, Register dst, Register len, Register result, Register tmp) { +++ Label loop, DIFFERENCE, DONE; +++ +++ BLOCK_COMMENT("encode_iso_array_v {"); +++ mv(result, 0); +++ +++ bind(loop); +++ mv(tmp, 0xff); +++ vsetvli(t0, len, Assembler::e16, Assembler::m2); +++ vle16_v(v2, src); +++ // if element > 0xff, stop +++ vmsgtu_vx(v1, v2, tmp); +++ vfirst_m(tmp, v1); +++ vmsbf_m(v0, v1); +++ // compress char to byte +++ vsetvli(t0, len, Assembler::e8); +++ vncvt_x_x_w(v1, v2, Assembler::v0_t); +++ vse8_v(v1, dst, Assembler::v0_t); +++ +++ bgez(tmp, DIFFERENCE); +++ add(result, result, t0); +++ add(dst, dst, t0); +++ sub(len, len, t0); +++ shadd(src, t0, src, t0, 1); +++ bnez(len, loop); +++ j(DONE); +++ +++ bind(DIFFERENCE); +++ add(result, result, tmp); +++ +++ bind(DONE); +++ BLOCK_COMMENT("} encode_iso_array_v"); +++} +++ +++void C2_MacroAssembler::has_negatives_v(Register ary, Register len, +++ Register result, Register tmp) { +++ Label LOOP, DONE; +++ +++ BLOCK_COMMENT("has_negatives_v {"); +++ assert_different_registers(ary, len, result, tmp); +++ +++ mv(result, true); +++ +++ bind(LOOP); +++ vsetvli(t0, len, Assembler::e8, Assembler::m4); +++ vle8_v(v0, ary); +++ vmslt_vx(v0, v0, zr); +++ vfirst_m(tmp, v0); +++ bgez(tmp, DONE); +++ +++ sub(len, len, t0); +++ add(ary, ary, t0); +++ bnez(len, LOOP); +++ mv(result, false); +++ +++ bind(DONE); +++ BLOCK_COMMENT("} has_negatives_v"); +++} +++ +++void C2_MacroAssembler::string_indexof_char_v(Register str1, Register cnt1, +++ Register ch, Register result, +++ Register tmp1, Register tmp2, +++ bool isL) { +++ mv(result, zr); +++ +++ Label loop, MATCH, DONE; +++ Assembler::SEW sew = isL ? Assembler::e8 : Assembler::e16; +++ bind(loop); +++ vsetvli(tmp1, cnt1, sew, Assembler::m4); +++ vlex_v(v0, str1, sew); +++ vmseq_vx(v0, v0, ch); +++ vfirst_m(tmp2, v0); +++ bgez(tmp2, MATCH); // if equal, return index +++ +++ add(result, result, tmp1); +++ sub(cnt1, cnt1, tmp1); +++ if (!isL) slli(tmp1, tmp1, 1); +++ add(str1, str1, tmp1); +++ bnez(cnt1, loop); +++ +++ mv(result, -1); +++ j(DONE); +++ +++ bind(MATCH); +++ add(result, result, tmp2); +++ +++ bind(DONE); +++} +++ +++// Set dst to NaN if any NaN input. +++void C2_MacroAssembler::minmax_FD_v(VectorRegister dst, VectorRegister src1, VectorRegister src2, +++ bool is_double, bool is_min) { +++ assert_different_registers(dst, src1, src2); +++ +++ vsetvli(t0, x0, is_double ? Assembler::e64 : Assembler::e32); +++ +++ is_min ? vfmin_vv(dst, src1, src2) +++ : vfmax_vv(dst, src1, src2); +++ +++ vmfne_vv(v0, src1, src1); +++ vfadd_vv(dst, src1, src1, Assembler::v0_t); +++ vmfne_vv(v0, src2, src2); +++ vfadd_vv(dst, src2, src2, Assembler::v0_t); +++} +++ +++// Set dst to NaN if any NaN input. +++void C2_MacroAssembler::reduce_minmax_FD_v(FloatRegister dst, +++ FloatRegister src1, VectorRegister src2, +++ VectorRegister tmp1, VectorRegister tmp2, +++ bool is_double, bool is_min) { +++ assert_different_registers(src2, tmp1, tmp2); +++ +++ Label L_done, L_NaN; +++ vsetvli(t0, x0, is_double ? Assembler::e64 : Assembler::e32); +++ vfmv_s_f(tmp2, src1); +++ +++ is_min ? vfredmin_vs(tmp1, src2, tmp2) +++ : vfredmax_vs(tmp1, src2, tmp2); +++ +++ fsflags(zr); +++ // Checking NaNs +++ vmflt_vf(tmp2, src2, src1); +++ frflags(t0); +++ bnez(t0, L_NaN); +++ j(L_done); +++ +++ bind(L_NaN); +++ vfmv_s_f(tmp2, src1); +++ vfredusum_vs(tmp1, src2, tmp2); +++ +++ bind(L_done); +++ vfmv_f_s(dst, tmp1); +++} ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/c2_MacroAssembler_riscv.hpp ++@@ -0,0 +1,193 @@ +++/* +++ * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#ifndef CPU_RISCV_C2_MACROASSEMBLER_RISCV_HPP +++#define CPU_RISCV_C2_MACROASSEMBLER_RISCV_HPP +++ +++// C2_MacroAssembler contains high-level macros for C2 +++ +++ private: +++ void element_compare(Register r1, Register r2, +++ Register result, Register cnt, +++ Register tmp1, Register tmp2, +++ VectorRegister vr1, VectorRegister vr2, +++ VectorRegister vrs, +++ bool is_latin, Label& DONE); +++ public: +++ +++ void string_compare(Register str1, Register str2, +++ Register cnt1, Register cnt2, Register result, +++ Register tmp1, Register tmp2, Register tmp3, +++ int ae); +++ +++ void string_indexof_char_short(Register str1, Register cnt1, +++ Register ch, Register result, +++ bool isL); +++ +++ void string_indexof_char(Register str1, Register cnt1, +++ Register ch, Register result, +++ Register tmp1, Register tmp2, +++ Register tmp3, Register tmp4, +++ bool isL); +++ +++ void string_indexof(Register str1, Register str2, +++ Register cnt1, Register cnt2, +++ Register tmp1, Register tmp2, +++ Register tmp3, Register tmp4, +++ Register tmp5, Register tmp6, +++ Register result, int ae); +++ +++ void string_indexof_linearscan(Register haystack, Register needle, +++ Register haystack_len, Register needle_len, +++ Register tmp1, Register tmp2, +++ Register tmp3, Register tmp4, +++ int needle_con_cnt, Register result, int ae); +++ +++ void arrays_equals(Register r1, Register r2, +++ Register tmp3, Register tmp4, +++ Register tmp5, Register tmp6, +++ Register result, Register cnt1, +++ int elem_size); +++ +++ void string_equals(Register r1, Register r2, +++ Register result, Register cnt1, +++ int elem_size); +++ +++ // refer to conditional_branches and float_conditional_branches +++ static const int bool_test_bits = 3; +++ static const int neg_cond_bits = 2; +++ static const int unsigned_branch_mask = 1 << bool_test_bits; +++ static const int double_branch_mask = 1 << bool_test_bits; +++ +++ // cmp +++ void cmp_branch(int cmpFlag, +++ Register op1, Register op2, +++ Label& label, bool is_far = false); +++ +++ void float_cmp_branch(int cmpFlag, +++ FloatRegister op1, FloatRegister op2, +++ Label& label, bool is_far = false); +++ +++ void enc_cmpUEqNeLeGt_imm0_branch(int cmpFlag, Register op, +++ Label& L, bool is_far = false); +++ +++ void enc_cmpEqNe_imm0_branch(int cmpFlag, Register op, +++ Label& L, bool is_far = false); +++ +++ void enc_cmove(int cmpFlag, +++ Register op1, Register op2, +++ Register dst, Register src); +++ +++ void spill(Register r, bool is64, int offset) { +++ is64 ? sd(r, Address(sp, offset)) +++ : sw(r, Address(sp, offset)); +++ } +++ +++ void spill(FloatRegister f, bool is64, int offset) { +++ is64 ? fsd(f, Address(sp, offset)) +++ : fsw(f, Address(sp, offset)); +++ } +++ +++ void spill(VectorRegister v, int offset) { +++ add(t0, sp, offset); +++ vs1r_v(v, t0); +++ } +++ +++ void unspill(Register r, bool is64, int offset) { +++ is64 ? ld(r, Address(sp, offset)) +++ : lw(r, Address(sp, offset)); +++ } +++ +++ void unspillu(Register r, bool is64, int offset) { +++ is64 ? ld(r, Address(sp, offset)) +++ : lwu(r, Address(sp, offset)); +++ } +++ +++ void unspill(FloatRegister f, bool is64, int offset) { +++ is64 ? fld(f, Address(sp, offset)) +++ : flw(f, Address(sp, offset)); +++ } +++ +++ void unspill(VectorRegister v, int offset) { +++ add(t0, sp, offset); +++ vl1re8_v(v, t0); +++ } +++ +++ void spill_copy_vector_stack_to_stack(int src_offset, int dst_offset, int vec_reg_size_in_bytes) { +++ assert(vec_reg_size_in_bytes % 16 == 0, "unexpected vector reg size"); +++ unspill(v0, src_offset); +++ spill(v0, dst_offset); +++ } +++ +++ void minmax_FD(FloatRegister dst, +++ FloatRegister src1, FloatRegister src2, +++ bool is_double, bool is_min); +++ +++ // intrinsic methods implemented by rvv instructions +++ void string_equals_v(Register r1, Register r2, +++ Register result, Register cnt1, +++ int elem_size); +++ +++ void arrays_equals_v(Register r1, Register r2, +++ Register result, Register cnt1, +++ int elem_size); +++ +++ void string_compare_v(Register str1, Register str2, +++ Register cnt1, Register cnt2, +++ Register result, +++ Register tmp1, Register tmp2, +++ int encForm); +++ +++ void clear_array_v(Register base, Register cnt); +++ +++ void byte_array_inflate_v(Register src, Register dst, +++ Register len, Register tmp); +++ +++ void char_array_compress_v(Register src, Register dst, +++ Register len, Register result, +++ Register tmp); +++ +++ void encode_iso_array_v(Register src, Register dst, +++ Register len, Register result, +++ Register tmp); +++ +++ void has_negatives_v(Register ary, Register len, +++ Register result, Register tmp); +++ +++ void string_indexof_char_v(Register str1, Register cnt1, +++ Register ch, Register result, +++ Register tmp1, Register tmp2, +++ bool isL); +++ +++ void minmax_FD_v(VectorRegister dst, +++ VectorRegister src1, VectorRegister src2, +++ bool is_double, bool is_min); +++ +++ void reduce_minmax_FD_v(FloatRegister dst, +++ FloatRegister src1, VectorRegister src2, +++ VectorRegister tmp1, VectorRegister tmp2, +++ bool is_double, bool is_min); +++ +++#endif // CPU_RISCV_C2_MACROASSEMBLER_RISCV_HPP ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/c2_globals_riscv.hpp ++@@ -0,0 +1,85 @@ +++/* +++ * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#ifndef CPU_RISCV_C2_GLOBALS_RISCV_HPP +++#define CPU_RISCV_C2_GLOBALS_RISCV_HPP +++ +++#include "utilities/globalDefinitions.hpp" +++#include "utilities/macros.hpp" +++ +++// Sets the default values for platform dependent flags used by the server compiler. +++// (see c2_globals.hpp). Alpha-sorted. +++ +++define_pd_global(bool, BackgroundCompilation, true); +++define_pd_global(bool, CICompileOSR, true); +++define_pd_global(bool, InlineIntrinsics, true); +++define_pd_global(bool, PreferInterpreterNativeStubs, false); +++define_pd_global(bool, ProfileTraps, true); +++define_pd_global(bool, UseOnStackReplacement, true); +++define_pd_global(bool, ProfileInterpreter, true); +++define_pd_global(bool, TieredCompilation, COMPILER1_PRESENT(true) NOT_COMPILER1(false)); +++define_pd_global(intx, CompileThreshold, 10000); +++ +++define_pd_global(intx, OnStackReplacePercentage, 140); +++define_pd_global(intx, ConditionalMoveLimit, 0); +++define_pd_global(intx, FLOATPRESSURE, 32); +++define_pd_global(intx, FreqInlineSize, 325); +++define_pd_global(intx, MinJumpTableSize, 10); +++define_pd_global(intx, INTPRESSURE, 24); +++define_pd_global(intx, InteriorEntryAlignment, 16); +++define_pd_global(intx, NewSizeThreadIncrease, ScaleForWordSize(4*K)); +++define_pd_global(intx, LoopUnrollLimit, 60); +++define_pd_global(intx, LoopPercentProfileLimit, 10); +++// InitialCodeCacheSize derived from specjbb2000 run. +++define_pd_global(intx, InitialCodeCacheSize, 2496*K); // Integral multiple of CodeCacheExpansionSize +++define_pd_global(intx, CodeCacheExpansionSize, 64*K); +++ +++// Ergonomics related flags +++define_pd_global(uint64_t,MaxRAM, 128ULL*G); +++define_pd_global(intx, RegisterCostAreaRatio, 16000); +++ +++// Peephole and CISC spilling both break the graph, and so makes the +++// scheduler sick. +++define_pd_global(bool, OptoPeephole, false); +++define_pd_global(bool, UseCISCSpill, false); +++define_pd_global(bool, OptoScheduling, true); +++define_pd_global(bool, OptoBundling, false); +++define_pd_global(bool, OptoRegScheduling, false); +++define_pd_global(bool, SuperWordLoopUnrollAnalysis, true); +++define_pd_global(bool, IdealizeClearArrayNode, true); +++ +++define_pd_global(intx, ReservedCodeCacheSize, 48*M); +++define_pd_global(intx, NonProfiledCodeHeapSize, 21*M); +++define_pd_global(intx, ProfiledCodeHeapSize, 22*M); +++define_pd_global(intx, NonNMethodCodeHeapSize, 5*M ); +++define_pd_global(uintx, CodeCacheMinBlockLength, 6); +++define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K); +++ +++// Ergonomics related flags +++define_pd_global(bool, NeverActAsServerClassMachine, false); +++ +++define_pd_global(bool, TrapBasedRangeChecks, false); // Not needed. +++ +++#endif // CPU_RISCV_C2_GLOBALS_RISCV_HPP ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/c2_init_riscv.cpp ++@@ -0,0 +1,38 @@ +++/* +++ * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2014, 2019, Red Hat Inc. All rights reserved. +++ * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#include "precompiled.hpp" +++#include "opto/compile.hpp" +++#include "opto/node.hpp" +++ +++// processor dependent initialization for riscv +++ +++extern void reg_mask_init(); +++ +++void Compile::pd_compiler2_init() { +++ guarantee(CodeEntryAlignment >= InteriorEntryAlignment, "" ); +++ reg_mask_init(); +++} ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/c2_safepointPollStubTable_riscv.cpp ++@@ -0,0 +1,48 @@ +++/* +++ * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#include "precompiled.hpp" +++#include "asm/macroAssembler.hpp" +++#include "opto/compile.hpp" +++#include "opto/node.hpp" +++#include "opto/output.hpp" +++#include "runtime/sharedRuntime.hpp" +++ +++#define __ masm. +++void C2SafepointPollStubTable::emit_stub_impl(MacroAssembler& masm, C2SafepointPollStub* entry) const { +++ assert(SharedRuntime::polling_page_return_handler_blob() != NULL, +++ "polling page return stub not created yet"); +++ address stub = SharedRuntime::polling_page_return_handler_blob()->entry_point(); +++ RuntimeAddress callback_addr(stub); +++ +++ __ bind(entry->_stub_label); +++ InternalAddress safepoint_pc(__ pc() - __ offset() + entry->_safepoint_offset); +++ __ relocate(safepoint_pc.rspec(), [&] { +++ __ la(t0, safepoint_pc.target()); +++ }); +++ __ sd(t0, Address(xthread, JavaThread::saved_exception_pc_offset())); +++ __ far_jump(callback_addr); +++} +++#undef __ ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/codeBuffer_riscv.hpp ++@@ -0,0 +1,36 @@ +++/* +++ * Copyright (c) 2002, 2019, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2014, Red Hat Inc. All rights reserved. +++ * Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#ifndef CPU_RISCV_CODEBUFFER_RISCV_HPP +++#define CPU_RISCV_CODEBUFFER_RISCV_HPP +++ +++private: +++ void pd_initialize() {} +++ +++public: +++ void flush_bundle(bool start_new_bundle) {} +++ +++#endif // CPU_RISCV_CODEBUFFER_RISCV_HPP ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/compiledIC_riscv.cpp ++@@ -0,0 +1,149 @@ +++/* +++ * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2014, 2018, Red Hat Inc. All rights reserved. +++ * Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#include "precompiled.hpp" +++#include "asm/macroAssembler.inline.hpp" +++#include "code/compiledIC.hpp" +++#include "code/icBuffer.hpp" +++#include "code/nmethod.hpp" +++#include "memory/resourceArea.hpp" +++#include "runtime/mutexLocker.hpp" +++#include "runtime/safepoint.hpp" +++ +++// ---------------------------------------------------------------------------- +++ +++#define __ _masm. +++address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark) { +++ precond(cbuf.stubs()->start() != badAddress); +++ precond(cbuf.stubs()->end() != badAddress); +++ // Stub is fixed up when the corresponding call is converted from +++ // calling compiled code to calling interpreted code. +++ // mv xmethod, 0 +++ // jalr -4 # to self +++ +++ if (mark == NULL) { +++ mark = cbuf.insts_mark(); // Get mark within main instrs section. +++ } +++ +++ // Note that the code buffer's insts_mark is always relative to insts. +++ // That's why we must use the macroassembler to generate a stub. +++ MacroAssembler _masm(&cbuf); +++ +++ address base = __ start_a_stub(to_interp_stub_size()); +++ int offset = __ offset(); +++ if (base == NULL) { +++ return NULL; // CodeBuffer::expand failed +++ } +++ // static stub relocation stores the instruction address of the call +++ __ relocate(static_stub_Relocation::spec(mark)); +++ +++ __ emit_static_call_stub(); +++ +++ assert((__ offset() - offset) <= (int)to_interp_stub_size(), "stub too big"); +++ __ end_a_stub(); +++ return base; +++} +++#undef __ +++ +++int CompiledStaticCall::to_interp_stub_size() { +++ // (lui, addi, slli, addi, slli, addi) + (lui, addi, slli, addi, slli) + jalr +++ return 12 * NativeInstruction::instruction_size; +++} +++ +++int CompiledStaticCall::to_trampoline_stub_size() { +++ // Somewhat pessimistically, we count 4 instructions here (although +++ // there are only 3) because we sometimes emit an alignment nop. +++ // Trampoline stubs are always word aligned. +++ return NativeInstruction::instruction_size + NativeCallTrampolineStub::instruction_size; +++} +++ +++// Relocation entries for call stub, compiled java to interpreter. +++int CompiledStaticCall::reloc_to_interp_stub() { +++ return 4; // 3 in emit_to_interp_stub + 1 in emit_call +++} +++ +++void CompiledDirectStaticCall::set_to_interpreted(const methodHandle& callee, address entry) { +++ address stub = find_stub(); +++ guarantee(stub != NULL, "stub not found"); +++ +++ if (TraceICs) { +++ ResourceMark rm; +++ tty->print_cr("CompiledDirectStaticCall@" INTPTR_FORMAT ": set_to_interpreted %s", +++ p2i(instruction_address()), +++ callee->name_and_sig_as_C_string()); +++ } +++ +++ // Creation also verifies the object. +++ NativeMovConstReg* method_holder +++ = nativeMovConstReg_at(stub); +++#ifdef ASSERT +++ NativeGeneralJump* jump = nativeGeneralJump_at(method_holder->next_instruction_address()); +++ +++ verify_mt_safe(callee, entry, method_holder, jump); +++#endif +++ // Update stub. +++ method_holder->set_data((intptr_t)callee()); +++ NativeGeneralJump::insert_unconditional(method_holder->next_instruction_address(), entry); +++ ICache::invalidate_range(stub, to_interp_stub_size()); +++ // Update jump to call. +++ set_destination_mt_safe(stub); +++} +++ +++void CompiledDirectStaticCall::set_stub_to_clean(static_stub_Relocation* static_stub) { +++ // Reset stub. +++ address stub = static_stub->addr(); +++ assert(stub != NULL, "stub not found"); +++ assert(CompiledICLocker::is_safe(stub), "mt unsafe call"); +++ // Creation also verifies the object. +++ NativeMovConstReg* method_holder +++ = nativeMovConstReg_at(stub); +++ method_holder->set_data(0); +++ NativeJump* jump = nativeJump_at(method_holder->next_instruction_address()); +++ jump->set_jump_destination((address)-1); +++} +++ +++//----------------------------------------------------------------------------- +++// Non-product mode code +++#ifndef PRODUCT +++ +++void CompiledDirectStaticCall::verify() { +++ // Verify call. +++ _call->verify(); +++ _call->verify_alignment(); +++ +++ // Verify stub. +++ address stub = find_stub(); +++ assert(stub != NULL, "no stub found for static call"); +++ // Creation also verifies the object. +++ NativeMovConstReg* method_holder +++ = nativeMovConstReg_at(stub); +++ NativeJump* jump = nativeJump_at(method_holder->next_instruction_address()); +++ +++ // Verify state. +++ assert(is_clean() || is_call_to_compiled() || is_call_to_interpreted(), "sanity check"); +++} +++ +++#endif // !PRODUCT ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/copy_riscv.hpp ++@@ -0,0 +1,143 @@ +++/* +++ * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2014, Red Hat Inc. All rights reserved. +++ * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#ifndef CPU_RISCV_COPY_RISCV_HPP +++#define CPU_RISCV_COPY_RISCV_HPP +++ +++#include OS_CPU_HEADER(copy) +++ +++static void pd_fill_to_words(HeapWord* tohw, size_t count, juint value) { +++ julong* to = (julong*) tohw; +++ julong v = ((julong) value << 32) | value; +++ while (count-- > 0) { +++ *to++ = v; +++ } +++} +++ +++static void pd_fill_to_aligned_words(HeapWord* tohw, size_t count, juint value) { +++ pd_fill_to_words(tohw, count, value); +++} +++ +++static void pd_fill_to_bytes(void* to, size_t count, jubyte value) { +++ (void)memset(to, value, count); +++} +++ +++static void pd_zero_to_words(HeapWord* tohw, size_t count) { +++ pd_fill_to_words(tohw, count, 0); +++} +++ +++static void pd_zero_to_bytes(void* to, size_t count) { +++ (void)memset(to, 0, count); +++} +++ +++static void pd_conjoint_words(const HeapWord* from, HeapWord* to, size_t count) { +++ (void)memmove(to, from, count * HeapWordSize); +++} +++ +++static inline void pd_disjoint_words_helper(const HeapWord* from, HeapWord* to, size_t count, bool is_atomic) { +++ switch (count) { +++ case 8: to[7] = from[7]; // fall through +++ case 7: to[6] = from[6]; // fall through +++ case 6: to[5] = from[5]; // fall through +++ case 5: to[4] = from[4]; // fall through +++ case 4: to[3] = from[3]; // fall through +++ case 3: to[2] = from[2]; // fall through +++ case 2: to[1] = from[1]; // fall through +++ case 1: to[0] = from[0]; // fall through +++ case 0: break; +++ default: +++ if (is_atomic) { +++ while (count-- > 0) { *to++ = *from++; } +++ } else { +++ memcpy(to, from, count * HeapWordSize); +++ } +++ } +++} +++ +++static void pd_disjoint_words(const HeapWord* from, HeapWord* to, size_t count) { +++ pd_disjoint_words_helper(from, to, count, false); +++} +++ +++static void pd_disjoint_words_atomic(const HeapWord* from, HeapWord* to, size_t count) { +++ pd_disjoint_words_helper(from, to, count, true); +++} +++ +++static void pd_aligned_conjoint_words(const HeapWord* from, HeapWord* to, size_t count) { +++ pd_conjoint_words(from, to, count); +++} +++ +++static void pd_aligned_disjoint_words(const HeapWord* from, HeapWord* to, size_t count) { +++ pd_disjoint_words(from, to, count); +++} +++ +++static void pd_conjoint_bytes(const void* from, void* to, size_t count) { +++ (void)memmove(to, from, count); +++} +++ +++static void pd_conjoint_bytes_atomic(const void* from, void* to, size_t count) { +++ pd_conjoint_bytes(from, to, count); +++} +++ +++static void pd_conjoint_jshorts_atomic(const jshort* from, jshort* to, size_t count) { +++ _Copy_conjoint_jshorts_atomic(from, to, count); +++} +++ +++static void pd_conjoint_jints_atomic(const jint* from, jint* to, size_t count) { +++ _Copy_conjoint_jints_atomic(from, to, count); +++} +++ +++static void pd_conjoint_jlongs_atomic(const jlong* from, jlong* to, size_t count) { +++ _Copy_conjoint_jlongs_atomic(from, to, count); +++} +++ +++static void pd_conjoint_oops_atomic(const oop* from, oop* to, size_t count) { +++ assert(BytesPerLong == BytesPerOop, "jlongs and oops must be the same size."); +++ _Copy_conjoint_jlongs_atomic((const jlong*)from, (jlong*)to, count); +++} +++ +++static void pd_arrayof_conjoint_bytes(const HeapWord* from, HeapWord* to, size_t count) { +++ _Copy_arrayof_conjoint_bytes(from, to, count); +++} +++ +++static void pd_arrayof_conjoint_jshorts(const HeapWord* from, HeapWord* to, size_t count) { +++ _Copy_arrayof_conjoint_jshorts(from, to, count); +++} +++ +++static void pd_arrayof_conjoint_jints(const HeapWord* from, HeapWord* to, size_t count) { +++ _Copy_arrayof_conjoint_jints(from, to, count); +++} +++ +++static void pd_arrayof_conjoint_jlongs(const HeapWord* from, HeapWord* to, size_t count) { +++ _Copy_arrayof_conjoint_jlongs(from, to, count); +++} +++ +++static void pd_arrayof_conjoint_oops(const HeapWord* from, HeapWord* to, size_t count) { +++ assert(!UseCompressedOops, "foo!"); +++ assert(BytesPerLong == BytesPerOop, "jlongs and oops must be the same size"); +++ _Copy_arrayof_conjoint_jlongs(from, to, count); +++} +++ +++#endif // CPU_RISCV_COPY_RISCV_HPP ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/disassembler_riscv.hpp ++@@ -0,0 +1,50 @@ +++/* +++ * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2014, Red Hat Inc. All rights reserved. +++ * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#ifndef CPU_RISCV_DISASSEMBLER_RISCV_HPP +++#define CPU_RISCV_DISASSEMBLER_RISCV_HPP +++ +++static int pd_instruction_alignment() { +++ return 1; +++} +++ +++static const char* pd_cpu_opts() { +++ return ""; +++} +++ +++// special-case instruction decoding. +++// There may be cases where the binutils disassembler doesn't do +++// the perfect job. In those cases, decode_instruction0 may kick in +++// and do it right. +++// If nothing had to be done, just return "here", otherwise return "here + instr_len(here)" +++static address decode_instruction0(address here, outputStream* st, address virtual_begin = NULL) { +++ return here; +++} +++ +++// platform-specific instruction annotations (like value of loaded constants) +++static void annotate(address pc, outputStream* st) {} +++ +++#endif // CPU_RISCV_DISASSEMBLER_RISCV_HPP ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/foreign_globals_riscv.cpp ++@@ -0,0 +1,44 @@ +++/* +++ * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#include "precompiled.hpp" +++#include "prims/foreign_globals.hpp" +++#include "utilities/debug.hpp" +++ +++// Stubbed out, implement later +++const ABIDescriptor ForeignGlobals::parse_abi_descriptor_impl(jobject jabi) const { +++ Unimplemented(); +++ return {}; +++} +++ +++const BufferLayout ForeignGlobals::parse_buffer_layout_impl(jobject jlayout) const { +++ Unimplemented(); +++ return {}; +++} +++ +++const CallRegs ForeignGlobals::parse_call_regs_impl(jobject jconv) const { +++ ShouldNotCallThis(); +++ return {}; +++} ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/foreign_globals_riscv.hpp ++@@ -0,0 +1,32 @@ +++/* +++ * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#ifndef CPU_RISCV_FOREIGN_GLOBALS_RISCV_HPP +++#define CPU_RISCV_FOREIGN_GLOBALS_RISCV_HPP +++ +++class ABIDescriptor {}; +++class BufferLayout {}; +++ +++#endif // CPU_RISCV_FOREIGN_GLOBALS_RISCV_HPP ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/frame_riscv.cpp ++@@ -0,0 +1,688 @@ +++/* +++ * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved. +++ * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#include "precompiled.hpp" +++#include "compiler/oopMap.hpp" +++#include "interpreter/interpreter.hpp" +++#include "memory/resourceArea.hpp" +++#include "memory/universe.hpp" +++#include "oops/markWord.hpp" +++#include "oops/method.hpp" +++#include "oops/oop.inline.hpp" +++#include "prims/methodHandles.hpp" +++#include "runtime/frame.inline.hpp" +++#include "runtime/handles.inline.hpp" +++#include "runtime/javaCalls.hpp" +++#include "runtime/monitorChunk.hpp" +++#include "runtime/os.inline.hpp" +++#include "runtime/signature.hpp" +++#include "runtime/stackWatermarkSet.hpp" +++#include "runtime/stubCodeGenerator.hpp" +++#include "runtime/stubRoutines.hpp" +++#include "vmreg_riscv.inline.hpp" +++#ifdef COMPILER1 +++#include "c1/c1_Runtime1.hpp" +++#include "runtime/vframeArray.hpp" +++#endif +++ +++#ifdef ASSERT +++void RegisterMap::check_location_valid() { +++} +++#endif +++ +++ +++// Profiling/safepoint support +++ +++bool frame::safe_for_sender(JavaThread *thread) { +++ address addr_sp = (address)_sp; +++ address addr_fp = (address)_fp; +++ address unextended_sp = (address)_unextended_sp; +++ +++ // consider stack guards when trying to determine "safe" stack pointers +++ // sp must be within the usable part of the stack (not in guards) +++ if (!thread->is_in_usable_stack(addr_sp)) { +++ return false; +++ } +++ +++ // When we are running interpreted code the machine stack pointer, SP, is +++ // set low enough so that the Java expression stack can grow and shrink +++ // without ever exceeding the machine stack bounds. So, ESP >= SP. +++ +++ // When we call out of an interpreted method, SP is incremented so that +++ // the space between SP and ESP is removed. The SP saved in the callee's +++ // frame is the SP *before* this increment. So, when we walk a stack of +++ // interpreter frames the sender's SP saved in a frame might be less than +++ // the SP at the point of call. +++ +++ // So unextended sp must be within the stack but we need not to check +++ // that unextended sp >= sp +++ +++ if (!thread->is_in_full_stack_checked(unextended_sp)) { +++ return false; +++ } +++ +++ // an fp must be within the stack and above (but not equal) sp +++ // second evaluation on fp+ is added to handle situation where fp is -1 +++ bool fp_safe = thread->is_in_stack_range_excl(addr_fp, addr_sp) && +++ thread->is_in_full_stack_checked(addr_fp + (return_addr_offset * sizeof(void*))); +++ +++ // We know sp/unextended_sp are safe only fp is questionable here +++ +++ // If the current frame is known to the code cache then we can attempt to +++ // to construct the sender and do some validation of it. This goes a long way +++ // toward eliminating issues when we get in frame construction code +++ +++ if (_cb != NULL) { +++ +++ // First check if frame is complete and tester is reliable +++ // Unfortunately we can only check frame complete for runtime stubs and nmethod +++ // other generic buffer blobs are more problematic so we just assume they are +++ // ok. adapter blobs never have a frame complete and are never ok. +++ +++ if (!_cb->is_frame_complete_at(_pc)) { +++ if (_cb->is_nmethod() || _cb->is_adapter_blob() || _cb->is_runtime_stub()) { +++ return false; +++ } +++ } +++ +++ // Could just be some random pointer within the codeBlob +++ if (!_cb->code_contains(_pc)) { +++ return false; +++ } +++ +++ // Entry frame checks +++ if (is_entry_frame()) { +++ // an entry frame must have a valid fp. +++ return fp_safe && is_entry_frame_valid(thread); +++ } +++ +++ intptr_t* sender_sp = NULL; +++ intptr_t* sender_unextended_sp = NULL; +++ address sender_pc = NULL; +++ intptr_t* saved_fp = NULL; +++ +++ if (is_interpreted_frame()) { +++ // fp must be safe +++ if (!fp_safe) { +++ return false; +++ } +++ +++ sender_pc = (address)this->fp()[return_addr_offset]; +++ // for interpreted frames, the value below is the sender "raw" sp, +++ // which can be different from the sender unextended sp (the sp seen +++ // by the sender) because of current frame local variables +++ sender_sp = (intptr_t*) addr_at(sender_sp_offset); +++ sender_unextended_sp = (intptr_t*) this->fp()[interpreter_frame_sender_sp_offset]; +++ saved_fp = (intptr_t*) this->fp()[link_offset]; +++ } else { +++ // must be some sort of compiled/runtime frame +++ // fp does not have to be safe (although it could be check for c1?) +++ +++ // check for a valid frame_size, otherwise we are unlikely to get a valid sender_pc +++ if (_cb->frame_size() <= 0) { +++ return false; +++ } +++ +++ sender_sp = _unextended_sp + _cb->frame_size(); +++ // Is sender_sp safe? +++ if (!thread->is_in_full_stack_checked((address)sender_sp)) { +++ return false; +++ } +++ +++ sender_unextended_sp = sender_sp; +++ sender_pc = (address) *(sender_sp - 1); +++ saved_fp = (intptr_t*) *(sender_sp - 2); +++ } +++ +++ +++ // If the potential sender is the interpreter then we can do some more checking +++ if (Interpreter::contains(sender_pc)) { +++ +++ // fp is always saved in a recognizable place in any code we generate. However +++ // only if the sender is interpreted/call_stub (c1 too?) are we certain that the saved fp +++ // is really a frame pointer. +++ if (!thread->is_in_stack_range_excl((address)saved_fp, (address)sender_sp)) { +++ return false; +++ } +++ +++ // construct the potential sender +++ frame sender(sender_sp, sender_unextended_sp, saved_fp, sender_pc); +++ +++ return sender.is_interpreted_frame_valid(thread); +++ } +++ +++ // We must always be able to find a recognizable pc +++ CodeBlob* sender_blob = CodeCache::find_blob_unsafe(sender_pc); +++ if (sender_pc == NULL || sender_blob == NULL) { +++ return false; +++ } +++ +++ // Could be a zombie method +++ if (sender_blob->is_zombie() || sender_blob->is_unloaded()) { +++ return false; +++ } +++ +++ // Could just be some random pointer within the codeBlob +++ if (!sender_blob->code_contains(sender_pc)) { +++ return false; +++ } +++ +++ // We should never be able to see an adapter if the current frame is something from code cache +++ if (sender_blob->is_adapter_blob()) { +++ return false; +++ } +++ +++ // Could be the call_stub +++ if (StubRoutines::returns_to_call_stub(sender_pc)) { +++ if (!thread->is_in_stack_range_excl((address)saved_fp, (address)sender_sp)) { +++ return false; +++ } +++ +++ // construct the potential sender +++ frame sender(sender_sp, sender_unextended_sp, saved_fp, sender_pc); +++ +++ // Validate the JavaCallWrapper an entry frame must have +++ address jcw = (address)sender.entry_frame_call_wrapper(); +++ +++ bool jcw_safe = (jcw < thread->stack_base()) && (jcw > (address)sender.fp()); +++ +++ return jcw_safe; +++ } +++ +++ CompiledMethod* nm = sender_blob->as_compiled_method_or_null(); +++ if (nm != NULL) { +++ if (nm->is_deopt_mh_entry(sender_pc) || nm->is_deopt_entry(sender_pc) || +++ nm->method()->is_method_handle_intrinsic()) { +++ return false; +++ } +++ } +++ +++ // If the frame size is 0 something (or less) is bad because every nmethod has a non-zero frame size +++ // because the return address counts against the callee's frame. +++ if (sender_blob->frame_size() <= 0) { +++ assert(!sender_blob->is_compiled(), "should count return address at least"); +++ return false; +++ } +++ +++ // We should never be able to see anything here except an nmethod. If something in the +++ // code cache (current frame) is called by an entity within the code cache that entity +++ // should not be anything but the call stub (already covered), the interpreter (already covered) +++ // or an nmethod. +++ if (!sender_blob->is_compiled()) { +++ return false; +++ } +++ +++ // Could put some more validation for the potential non-interpreted sender +++ // frame we'd create by calling sender if I could think of any. Wait for next crash in forte... +++ +++ // One idea is seeing if the sender_pc we have is one that we'd expect to call to current cb +++ +++ // We've validated the potential sender that would be created +++ return true; +++ } +++ +++ // Must be native-compiled frame. Since sender will try and use fp to find +++ // linkages it must be safe +++ if (!fp_safe) { +++ return false; +++ } +++ +++ // Will the pc we fetch be non-zero (which we'll find at the oldest frame) +++ if ((address)this->fp()[return_addr_offset] == NULL) { return false; } +++ +++ return true; +++} +++ +++void frame::patch_pc(Thread* thread, address pc) { +++ assert(_cb == CodeCache::find_blob(pc), "unexpected pc"); +++ address* pc_addr = &(((address*) sp())[-1]); +++ if (TracePcPatching) { +++ tty->print_cr("patch_pc at address " INTPTR_FORMAT " [" INTPTR_FORMAT " -> " INTPTR_FORMAT "]", +++ p2i(pc_addr), p2i(*pc_addr), p2i(pc)); +++ } +++ // Either the return address is the original one or we are going to +++ // patch in the same address that's already there. +++ assert(_pc == *pc_addr || pc == *pc_addr, "must be"); +++ *pc_addr = pc; +++ address original_pc = CompiledMethod::get_deopt_original_pc(this); +++ if (original_pc != NULL) { +++ assert(original_pc == _pc, "expected original PC to be stored before patching"); +++ _deopt_state = is_deoptimized; +++ // leave _pc as is +++ } else { +++ _deopt_state = not_deoptimized; +++ _pc = pc; +++ } +++} +++ +++bool frame::is_interpreted_frame() const { +++ return Interpreter::contains(pc()); +++} +++ +++int frame::frame_size(RegisterMap* map) const { +++ frame sender = this->sender(map); +++ return sender.sp() - sp(); +++} +++ +++intptr_t* frame::entry_frame_argument_at(int offset) const { +++ // convert offset to index to deal with tsi +++ int index = (Interpreter::expr_offset_in_bytes(offset)/wordSize); +++ // Entry frame's arguments are always in relation to unextended_sp() +++ return &unextended_sp()[index]; +++} +++ +++// sender_sp +++intptr_t* frame::interpreter_frame_sender_sp() const { +++ assert(is_interpreted_frame(), "interpreted frame expected"); +++ return (intptr_t*) at(interpreter_frame_sender_sp_offset); +++} +++ +++void frame::set_interpreter_frame_sender_sp(intptr_t* sender_sp) { +++ assert(is_interpreted_frame(), "interpreted frame expected"); +++ ptr_at_put(interpreter_frame_sender_sp_offset, (intptr_t) sender_sp); +++} +++ +++ +++// monitor elements +++ +++BasicObjectLock* frame::interpreter_frame_monitor_begin() const { +++ return (BasicObjectLock*) addr_at(interpreter_frame_monitor_block_bottom_offset); +++} +++ +++BasicObjectLock* frame::interpreter_frame_monitor_end() const { +++ BasicObjectLock* result = (BasicObjectLock*) *addr_at(interpreter_frame_monitor_block_top_offset); +++ // make sure the pointer points inside the frame +++ assert(sp() <= (intptr_t*) result, "monitor end should be above the stack pointer"); +++ assert((intptr_t*) result < fp(), "monitor end should be strictly below the frame pointer"); +++ return result; +++} +++ +++void frame::interpreter_frame_set_monitor_end(BasicObjectLock* value) { +++ *((BasicObjectLock**)addr_at(interpreter_frame_monitor_block_top_offset)) = value; +++} +++ +++// Used by template based interpreter deoptimization +++void frame::interpreter_frame_set_last_sp(intptr_t* last_sp) { +++ *((intptr_t**)addr_at(interpreter_frame_last_sp_offset)) = last_sp; +++} +++ +++frame frame::sender_for_entry_frame(RegisterMap* map) const { +++ assert(map != NULL, "map must be set"); +++ // Java frame called from C; skip all C frames and return top C +++ // frame of that chunk as the sender +++ JavaFrameAnchor* jfa = entry_frame_call_wrapper()->anchor(); +++ assert(!entry_frame_is_first(), "next Java fp must be non zero"); +++ assert(jfa->last_Java_sp() > sp(), "must be above this frame on stack"); +++ // Since we are walking the stack now this nested anchor is obviously walkable +++ // even if it wasn't when it was stacked. +++ jfa->make_walkable(); +++ map->clear(); +++ assert(map->include_argument_oops(), "should be set by clear"); +++ vmassert(jfa->last_Java_pc() != NULL, "not walkable"); +++ frame fr(jfa->last_Java_sp(), jfa->last_Java_fp(), jfa->last_Java_pc()); +++ return fr; +++} +++ +++OptimizedEntryBlob::FrameData* OptimizedEntryBlob::frame_data_for_frame(const frame& frame) const { +++ ShouldNotCallThis(); +++ return nullptr; +++} +++ +++bool frame::optimized_entry_frame_is_first() const { +++ ShouldNotCallThis(); +++ return false; +++} +++ +++frame frame::sender_for_optimized_entry_frame(RegisterMap* map) const { +++ ShouldNotCallThis(); +++ return {}; +++} +++ +++//------------------------------------------------------------------------------ +++// frame::verify_deopt_original_pc +++// +++// Verifies the calculated original PC of a deoptimization PC for the +++// given unextended SP. +++#ifdef ASSERT +++void frame::verify_deopt_original_pc(CompiledMethod* nm, intptr_t* unextended_sp) { +++ frame fr; +++ +++ // This is ugly but it's better than to change {get,set}_original_pc +++ // to take an SP value as argument. And it's only a debugging +++ // method anyway. +++ fr._unextended_sp = unextended_sp; +++ +++ assert_cond(nm != NULL); +++ address original_pc = nm->get_original_pc(&fr); +++ assert(nm->insts_contains_inclusive(original_pc), +++ "original PC must be in the main code section of the the compiled method (or must be immediately following it)"); +++} +++#endif +++ +++//------------------------------------------------------------------------------ +++// frame::adjust_unextended_sp +++void frame::adjust_unextended_sp() { +++ // On riscv, sites calling method handle intrinsics and lambda forms are treated +++ // as any other call site. Therefore, no special action is needed when we are +++ // returning to any of these call sites. +++ +++ if (_cb != NULL) { +++ CompiledMethod* sender_cm = _cb->as_compiled_method_or_null(); +++ if (sender_cm != NULL) { +++ // If the sender PC is a deoptimization point, get the original PC. +++ if (sender_cm->is_deopt_entry(_pc) || +++ sender_cm->is_deopt_mh_entry(_pc)) { +++ DEBUG_ONLY(verify_deopt_original_pc(sender_cm, _unextended_sp)); +++ } +++ } +++ } +++} +++ +++//------------------------------------------------------------------------------ +++// frame::update_map_with_saved_link +++void frame::update_map_with_saved_link(RegisterMap* map, intptr_t** link_addr) { +++ // The interpreter and compiler(s) always save fp in a known +++ // location on entry. We must record where that location is +++ // so that if fp was live on callout from c2 we can find +++ // the saved copy no matter what it called. +++ +++ // Since the interpreter always saves fp if we record where it is then +++ // we don't have to always save fp on entry and exit to c2 compiled +++ // code, on entry will be enough. +++ assert(map != NULL, "map must be set"); +++ map->set_location(::fp->as_VMReg(), (address) link_addr); +++ // this is weird "H" ought to be at a higher address however the +++ // oopMaps seems to have the "H" regs at the same address and the +++ // vanilla register. +++ map->set_location(::fp->as_VMReg()->next(), (address) link_addr); +++} +++ +++ +++//------------------------------------------------------------------------------ +++// frame::sender_for_interpreter_frame +++frame frame::sender_for_interpreter_frame(RegisterMap* map) const { +++ // SP is the raw SP from the sender after adapter or interpreter +++ // extension. +++ intptr_t* sender_sp = this->sender_sp(); +++ +++ // This is the sp before any possible extension (adapter/locals). +++ intptr_t* unextended_sp = interpreter_frame_sender_sp(); +++ +++#ifdef COMPILER2 +++ assert(map != NULL, "map must be set"); +++ if (map->update_map()) { +++ update_map_with_saved_link(map, (intptr_t**) addr_at(link_offset)); +++ } +++#endif // COMPILER2 +++ +++ return frame(sender_sp, unextended_sp, link(), sender_pc()); +++} +++ +++ +++//------------------------------------------------------------------------------ +++// frame::sender_for_compiled_frame +++frame frame::sender_for_compiled_frame(RegisterMap* map) const { +++ // we cannot rely upon the last fp having been saved to the thread +++ // in C2 code but it will have been pushed onto the stack. so we +++ // have to find it relative to the unextended sp +++ +++ assert(_cb->frame_size() >= 0, "must have non-zero frame size"); +++ intptr_t* l_sender_sp = unextended_sp() + _cb->frame_size(); +++ intptr_t* unextended_sp = l_sender_sp; +++ +++ // the return_address is always the word on the stack +++ address sender_pc = (address) *(l_sender_sp + frame::return_addr_offset); +++ +++ intptr_t** saved_fp_addr = (intptr_t**) (l_sender_sp + frame::link_offset); +++ +++ assert(map != NULL, "map must be set"); +++ if (map->update_map()) { +++ // Tell GC to use argument oopmaps for some runtime stubs that need it. +++ // For C1, the runtime stub might not have oop maps, so set this flag +++ // outside of update_register_map. +++ map->set_include_argument_oops(_cb->caller_must_gc_arguments(map->thread())); +++ if (_cb->oop_maps() != NULL) { +++ OopMapSet::update_register_map(this, map); +++ } +++ +++ // Since the prolog does the save and restore of FP there is no +++ // oopmap for it so we must fill in its location as if there was +++ // an oopmap entry since if our caller was compiled code there +++ // could be live jvm state in it. +++ update_map_with_saved_link(map, saved_fp_addr); +++ } +++ +++ return frame(l_sender_sp, unextended_sp, *saved_fp_addr, sender_pc); +++} +++ +++//------------------------------------------------------------------------------ +++// frame::sender_raw +++frame frame::sender_raw(RegisterMap* map) const { +++ // Default is we done have to follow them. The sender_for_xxx will +++ // update it accordingly +++ assert(map != NULL, "map must be set"); +++ map->set_include_argument_oops(false); +++ +++ if (is_entry_frame()) { +++ return sender_for_entry_frame(map); +++ } +++ if (is_interpreted_frame()) { +++ return sender_for_interpreter_frame(map); +++ } +++ assert(_cb == CodeCache::find_blob(pc()),"Must be the same"); +++ +++ // This test looks odd: why is it not is_compiled_frame() ? That's +++ // because stubs also have OOP maps. +++ if (_cb != NULL) { +++ return sender_for_compiled_frame(map); +++ } +++ +++ // Must be native-compiled frame, i.e. the marshaling code for native +++ // methods that exists in the core system. +++ return frame(sender_sp(), link(), sender_pc()); +++} +++ +++frame frame::sender(RegisterMap* map) const { +++ frame result = sender_raw(map); +++ +++ if (map->process_frames()) { +++ StackWatermarkSet::on_iteration(map->thread(), result); +++ } +++ +++ return result; +++} +++ +++bool frame::is_interpreted_frame_valid(JavaThread* thread) const { +++ assert(is_interpreted_frame(), "Not an interpreted frame"); +++ // These are reasonable sanity checks +++ if (fp() == NULL || (intptr_t(fp()) & (wordSize-1)) != 0) { +++ return false; +++ } +++ if (sp() == NULL || (intptr_t(sp()) & (wordSize-1)) != 0) { +++ return false; +++ } +++ if (fp() + interpreter_frame_initial_sp_offset < sp()) { +++ return false; +++ } +++ // These are hacks to keep us out of trouble. +++ // The problem with these is that they mask other problems +++ if (fp() <= sp()) { // this attempts to deal with unsigned comparison above +++ return false; +++ } +++ +++ // do some validation of frame elements +++ +++ // first the method +++ Method* m = *interpreter_frame_method_addr(); +++ // validate the method we'd find in this potential sender +++ if (!Method::is_valid_method(m)) { +++ return false; +++ } +++ +++ // stack frames shouldn't be much larger than max_stack elements +++ // this test requires the use of unextended_sp which is the sp as seen by +++ // the current frame, and not sp which is the "raw" pc which could point +++ // further because of local variables of the callee method inserted after +++ // method arguments +++ if (fp() - unextended_sp() > 1024 + m->max_stack()*Interpreter::stackElementSize) { +++ return false; +++ } +++ +++ // validate bci/bcx +++ address bcp = interpreter_frame_bcp(); +++ if (m->validate_bci_from_bcp(bcp) < 0) { +++ return false; +++ } +++ +++ // validate constantPoolCache* +++ ConstantPoolCache* cp = *interpreter_frame_cache_addr(); +++ if (MetaspaceObj::is_valid(cp) == false) { +++ return false; +++ } +++ +++ // validate locals +++ address locals = (address) *interpreter_frame_locals_addr(); +++ if (locals > thread->stack_base() || locals < (address) fp()) { +++ return false; +++ } +++ +++ // We'd have to be pretty unlucky to be mislead at this point +++ return true; +++} +++ +++BasicType frame::interpreter_frame_result(oop* oop_result, jvalue* value_result) { +++ assert(is_interpreted_frame(), "interpreted frame expected"); +++ Method* method = interpreter_frame_method(); +++ BasicType type = method->result_type(); +++ +++ intptr_t* tos_addr = NULL; +++ if (method->is_native()) { +++ tos_addr = (intptr_t*)sp(); +++ if (type == T_FLOAT || type == T_DOUBLE) { +++ // This is because we do a push(ltos) after push(dtos) in generate_native_entry. +++ tos_addr += 2 * Interpreter::stackElementWords; +++ } +++ } else { +++ tos_addr = (intptr_t*)interpreter_frame_tos_address(); +++ } +++ +++ switch (type) { +++ case T_OBJECT : +++ case T_ARRAY : { +++ oop obj; +++ if (method->is_native()) { +++ obj = cast_to_oop(at(interpreter_frame_oop_temp_offset)); +++ } else { +++ oop* obj_p = (oop*)tos_addr; +++ obj = (obj_p == NULL) ? (oop)NULL : *obj_p; +++ } +++ assert(Universe::is_in_heap_or_null(obj), "sanity check"); +++ *oop_result = obj; +++ break; +++ } +++ case T_BOOLEAN : value_result->z = *(jboolean*)tos_addr; break; +++ case T_BYTE : value_result->b = *(jbyte*)tos_addr; break; +++ case T_CHAR : value_result->c = *(jchar*)tos_addr; break; +++ case T_SHORT : value_result->s = *(jshort*)tos_addr; break; +++ case T_INT : value_result->i = *(jint*)tos_addr; break; +++ case T_LONG : value_result->j = *(jlong*)tos_addr; break; +++ case T_FLOAT : { +++ value_result->f = *(jfloat*)tos_addr; +++ break; +++ } +++ case T_DOUBLE : value_result->d = *(jdouble*)tos_addr; break; +++ case T_VOID : /* Nothing to do */ break; +++ default : ShouldNotReachHere(); +++ } +++ +++ return type; +++} +++ +++ +++intptr_t* frame::interpreter_frame_tos_at(jint offset) const { +++ int index = (Interpreter::expr_offset_in_bytes(offset)/wordSize); +++ return &interpreter_frame_tos_address()[index]; +++} +++ +++#ifndef PRODUCT +++ +++#define DESCRIBE_FP_OFFSET(name) \ +++ values.describe(frame_no, fp() + frame::name##_offset, #name) +++ +++void frame::describe_pd(FrameValues& values, int frame_no) { +++ if (is_interpreted_frame()) { +++ DESCRIBE_FP_OFFSET(interpreter_frame_sender_sp); +++ DESCRIBE_FP_OFFSET(interpreter_frame_last_sp); +++ DESCRIBE_FP_OFFSET(interpreter_frame_method); +++ DESCRIBE_FP_OFFSET(interpreter_frame_mdp); +++ DESCRIBE_FP_OFFSET(interpreter_frame_mirror); +++ DESCRIBE_FP_OFFSET(interpreter_frame_cache); +++ DESCRIBE_FP_OFFSET(interpreter_frame_locals); +++ DESCRIBE_FP_OFFSET(interpreter_frame_bcp); +++ DESCRIBE_FP_OFFSET(interpreter_frame_initial_sp); +++ } +++} +++#endif +++ +++intptr_t *frame::initial_deoptimization_info() { +++ // Not used on riscv, but we must return something. +++ return NULL; +++} +++ +++intptr_t* frame::real_fp() const { +++ if (_cb != NULL) { +++ // use the frame size if valid +++ int size = _cb->frame_size(); +++ if (size > 0) { +++ return unextended_sp() + size; +++ } +++ } +++ // else rely on fp() +++ assert(!is_compiled_frame(), "unknown compiled frame size"); +++ return fp(); +++} +++ +++#undef DESCRIBE_FP_OFFSET +++ +++#ifndef PRODUCT +++// This is a generic constructor which is only used by pns() in debug.cpp. +++frame::frame(void* ptr_sp, void* ptr_fp, void* pc) { +++ init((intptr_t*)ptr_sp, (intptr_t*)ptr_fp, (address)pc); +++} +++ +++void frame::pd_ps() {} +++#endif +++ +++void JavaFrameAnchor::make_walkable() { +++ // last frame set? +++ if (last_Java_sp() == NULL) { return; } +++ // already walkable? +++ if (walkable()) { return; } +++ vmassert(last_Java_sp() != NULL, "not called from Java code?"); +++ vmassert(last_Java_pc() == NULL, "already walkable"); +++ _last_Java_pc = (address)_last_Java_sp[-1]; +++ vmassert(walkable(), "something went wrong"); +++} ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/frame_riscv.hpp ++@@ -0,0 +1,204 @@ +++/* +++ * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#ifndef CPU_RISCV_FRAME_RISCV_HPP +++#define CPU_RISCV_FRAME_RISCV_HPP +++ +++#include "runtime/synchronizer.hpp" +++ +++// A frame represents a physical stack frame (an activation). Frames can be +++// C or Java frames, and the Java frames can be interpreted or compiled. +++// In contrast, vframes represent source-level activations, so that one physical frame +++// can correspond to multiple source level frames because of inlining. +++// A frame is comprised of {pc, fp, sp} +++// ------------------------------ Asm interpreter ---------------------------------------- +++// Layout of asm interpreter frame: +++// [expression stack ] * <- sp +++ +++// [monitors[0] ] \ +++// ... | monitor block size = k +++// [monitors[k-1] ] / +++// [frame initial esp ] ( == &monitors[0], initially here) initial_sp_offset +++// [byte code index/pointr] = bcx() bcx_offset +++ +++// [pointer to locals ] = locals() locals_offset +++// [constant pool cache ] = cache() cache_offset +++ +++// [klass of method ] = mirror() mirror_offset +++// [padding ] +++ +++// [methodData ] = mdp() mdx_offset +++// [Method ] = method() method_offset +++ +++// [last esp ] = last_sp() last_sp_offset +++// [old stack pointer ] (sender_sp) sender_sp_offset +++ +++// [old frame pointer ] +++// [return pc ] +++ +++// [last sp ] <- fp = link() +++// [oop temp ] (only for native calls) +++ +++// [padding ] (to preserve machine SP alignment) +++// [locals and parameters ] +++// <- sender sp +++// ------------------------------ Asm interpreter ---------------------------------------- +++ +++// ------------------------------ C Frame ------------------------------------------------ +++// Stack: gcc with -fno-omit-frame-pointer +++// . +++// . +++// +-> . +++// | +-----------------+ | +++// | | return address | | +++// | | previous fp ------+ +++// | | saved registers | +++// | | local variables | +++// | | ... | <-+ +++// | +-----------------+ | +++// | | return address | | +++// +------ previous fp | | +++// | saved registers | | +++// | local variables | | +++// +-> | ... | | +++// | +-----------------+ | +++// | | return address | | +++// | | previous fp ------+ +++// | | saved registers | +++// | | local variables | +++// | | ... | <-+ +++// | +-----------------+ | +++// | | return address | | +++// +------ previous fp | | +++// | saved registers | | +++// | local variables | | +++// $fp --> | ... | | +++// +-----------------+ | +++// | return address | | +++// | previous fp ------+ +++// | saved registers | +++// $sp --> | local variables | +++// +-----------------+ +++// ------------------------------ C Frame ------------------------------------------------ +++ +++ public: +++ enum { +++ pc_return_offset = 0, +++ +++ // All frames +++ link_offset = -2, +++ return_addr_offset = -1, +++ sender_sp_offset = 0, +++ +++ // Interpreter frames +++ interpreter_frame_oop_temp_offset = 1, // for native calls only +++ +++ interpreter_frame_sender_sp_offset = -3, +++ // outgoing sp before a call to an invoked method +++ interpreter_frame_last_sp_offset = interpreter_frame_sender_sp_offset - 1, +++ interpreter_frame_method_offset = interpreter_frame_last_sp_offset - 1, +++ interpreter_frame_mdp_offset = interpreter_frame_method_offset - 1, +++ interpreter_frame_padding_offset = interpreter_frame_mdp_offset - 1, +++ interpreter_frame_mirror_offset = interpreter_frame_padding_offset - 1, +++ interpreter_frame_cache_offset = interpreter_frame_mirror_offset - 1, +++ interpreter_frame_locals_offset = interpreter_frame_cache_offset - 1, +++ interpreter_frame_bcp_offset = interpreter_frame_locals_offset - 1, +++ interpreter_frame_initial_sp_offset = interpreter_frame_bcp_offset - 1, +++ +++ interpreter_frame_monitor_block_top_offset = interpreter_frame_initial_sp_offset, +++ interpreter_frame_monitor_block_bottom_offset = interpreter_frame_initial_sp_offset, +++ +++ // Entry frames +++ // n.b. these values are determined by the layout defined in +++ // stubGenerator for the Java call stub +++ entry_frame_after_call_words = 34, +++ entry_frame_call_wrapper_offset = -10, +++ +++ // we don't need a save area +++ arg_reg_save_area_bytes = 0 +++ }; +++ +++ intptr_t ptr_at(int offset) const { +++ return *ptr_at_addr(offset); +++ } +++ +++ void ptr_at_put(int offset, intptr_t value) { +++ *ptr_at_addr(offset) = value; +++ } +++ +++ private: +++ // an additional field beyond _sp and _pc: +++ intptr_t* _fp; // frame pointer +++ // The interpreter and adapters will extend the frame of the caller. +++ // Since oopMaps are based on the sp of the caller before extension +++ // we need to know that value. However in order to compute the address +++ // of the return address we need the real "raw" sp. Since sparc already +++ // uses sp() to mean "raw" sp and unextended_sp() to mean the caller's +++ // original sp we use that convention. +++ +++ intptr_t* _unextended_sp; +++ void adjust_unextended_sp(); +++ +++ intptr_t* ptr_at_addr(int offset) const { +++ return (intptr_t*) addr_at(offset); +++ } +++ +++#ifdef ASSERT +++ // Used in frame::sender_for_{interpreter,compiled}_frame +++ static void verify_deopt_original_pc( CompiledMethod* nm, intptr_t* unextended_sp); +++#endif +++ +++ public: +++ // Constructors +++ +++ frame(intptr_t* ptr_sp, intptr_t* ptr_fp, address pc); +++ +++ frame(intptr_t* ptr_sp, intptr_t* unextended_sp, intptr_t* ptr_fp, address pc); +++ +++ frame(intptr_t* ptr_sp, intptr_t* ptr_fp); +++ +++ void init(intptr_t* ptr_sp, intptr_t* ptr_fp, address pc); +++ +++ // accessors for the instance variables +++ // Note: not necessarily the real 'frame pointer' (see real_fp) +++ intptr_t* fp() const { return _fp; } +++ +++ inline address* sender_pc_addr() const; +++ +++ // expression stack tos if we are nested in a java call +++ intptr_t* interpreter_frame_last_sp() const; +++ +++ // helper to update a map with callee-saved RBP +++ static void update_map_with_saved_link(RegisterMap* map, intptr_t** link_addr); +++ +++ // deoptimization support +++ void interpreter_frame_set_last_sp(intptr_t* last_sp); +++ +++ static jint interpreter_frame_expression_stack_direction() { return -1; } +++ +++ // returns the sending frame, without applying any barriers +++ frame sender_raw(RegisterMap* map) const; +++ +++#endif // CPU_RISCV_FRAME_RISCV_HPP ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/frame_riscv.inline.hpp ++@@ -0,0 +1,246 @@ +++/* +++ * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2014, Red Hat Inc. All rights reserved. +++ * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#ifndef CPU_RISCV_FRAME_RISCV_INLINE_HPP +++#define CPU_RISCV_FRAME_RISCV_INLINE_HPP +++ +++#include "code/codeCache.hpp" +++#include "code/vmreg.inline.hpp" +++ +++// Inline functions for RISCV frames: +++ +++// Constructors: +++ +++inline frame::frame() { +++ _pc = NULL; +++ _sp = NULL; +++ _unextended_sp = NULL; +++ _fp = NULL; +++ _cb = NULL; +++ _deopt_state = unknown; +++} +++ +++static int spin; +++ +++inline void frame::init(intptr_t* ptr_sp, intptr_t* ptr_fp, address pc) { +++ intptr_t a = intptr_t(ptr_sp); +++ intptr_t b = intptr_t(ptr_fp); +++ _sp = ptr_sp; +++ _unextended_sp = ptr_sp; +++ _fp = ptr_fp; +++ _pc = pc; +++ assert(pc != NULL, "no pc?"); +++ _cb = CodeCache::find_blob(pc); +++ adjust_unextended_sp(); +++ +++ address original_pc = CompiledMethod::get_deopt_original_pc(this); +++ if (original_pc != NULL) { +++ _pc = original_pc; +++ _deopt_state = is_deoptimized; +++ } else { +++ _deopt_state = not_deoptimized; +++ } +++} +++ +++inline frame::frame(intptr_t* ptr_sp, intptr_t* ptr_fp, address pc) { +++ init(ptr_sp, ptr_fp, pc); +++} +++ +++inline frame::frame(intptr_t* ptr_sp, intptr_t* unextended_sp, intptr_t* ptr_fp, address pc) { +++ intptr_t a = intptr_t(ptr_sp); +++ intptr_t b = intptr_t(ptr_fp); +++ _sp = ptr_sp; +++ _unextended_sp = unextended_sp; +++ _fp = ptr_fp; +++ _pc = pc; +++ assert(pc != NULL, "no pc?"); +++ _cb = CodeCache::find_blob(pc); +++ adjust_unextended_sp(); +++ +++ address original_pc = CompiledMethod::get_deopt_original_pc(this); +++ if (original_pc != NULL) { +++ _pc = original_pc; +++ assert(_cb->as_compiled_method()->insts_contains_inclusive(_pc), +++ "original PC must be in the main code section of the the compiled method (or must be immediately following it)"); +++ _deopt_state = is_deoptimized; +++ } else { +++ _deopt_state = not_deoptimized; +++ } +++} +++ +++inline frame::frame(intptr_t* ptr_sp, intptr_t* ptr_fp) { +++ intptr_t a = intptr_t(ptr_sp); +++ intptr_t b = intptr_t(ptr_fp); +++ _sp = ptr_sp; +++ _unextended_sp = ptr_sp; +++ _fp = ptr_fp; +++ _pc = (address)(ptr_sp[-1]); +++ +++ // Here's a sticky one. This constructor can be called via AsyncGetCallTrace +++ // when last_Java_sp is non-null but the pc fetched is junk. If we are truly +++ // unlucky the junk value could be to a zombied method and we'll die on the +++ // find_blob call. This is also why we can have no asserts on the validity +++ // of the pc we find here. AsyncGetCallTrace -> pd_get_top_frame_for_signal_handler +++ // -> pd_last_frame should use a specialized version of pd_last_frame which could +++ // call a specilaized frame constructor instead of this one. +++ // Then we could use the assert below. However this assert is of somewhat dubious +++ // value. +++ +++ _cb = CodeCache::find_blob(_pc); +++ adjust_unextended_sp(); +++ +++ address original_pc = CompiledMethod::get_deopt_original_pc(this); +++ if (original_pc != NULL) { +++ _pc = original_pc; +++ _deopt_state = is_deoptimized; +++ } else { +++ _deopt_state = not_deoptimized; +++ } +++} +++ +++// Accessors +++ +++inline bool frame::equal(frame other) const { +++ bool ret = sp() == other.sp() && +++ unextended_sp() == other.unextended_sp() && +++ fp() == other.fp() && +++ pc() == other.pc(); +++ assert(!ret || ret && cb() == other.cb() && _deopt_state == other._deopt_state, "inconsistent construction"); +++ return ret; +++} +++ +++// Return unique id for this frame. The id must have a value where we can distinguish +++// identity and younger/older relationship. NULL represents an invalid (incomparable) +++// frame. +++inline intptr_t* frame::id(void) const { return unextended_sp(); } +++ +++// Return true if the frame is older (less recent activation) than the frame represented by id +++inline bool frame::is_older(intptr_t* id) const { assert(this->id() != NULL && id != NULL, "NULL frame id"); +++ return this->id() > id ; } +++ +++inline intptr_t* frame::link() const { return (intptr_t*) *(intptr_t **)addr_at(link_offset); } +++ +++inline intptr_t* frame::link_or_null() const { +++ intptr_t** ptr = (intptr_t **)addr_at(link_offset); +++ return os::is_readable_pointer(ptr) ? *ptr : NULL; +++} +++ +++inline intptr_t* frame::unextended_sp() const { return _unextended_sp; } +++ +++// Return address +++inline address* frame::sender_pc_addr() const { return (address*) addr_at(return_addr_offset); } +++inline address frame::sender_pc() const { return *sender_pc_addr(); } +++inline intptr_t* frame::sender_sp() const { return addr_at(sender_sp_offset); } +++ +++inline intptr_t** frame::interpreter_frame_locals_addr() const { +++ return (intptr_t**)addr_at(interpreter_frame_locals_offset); +++} +++ +++inline intptr_t* frame::interpreter_frame_last_sp() const { +++ return *(intptr_t**)addr_at(interpreter_frame_last_sp_offset); +++} +++ +++inline intptr_t* frame::interpreter_frame_bcp_addr() const { +++ return (intptr_t*)addr_at(interpreter_frame_bcp_offset); +++} +++ +++inline intptr_t* frame::interpreter_frame_mdp_addr() const { +++ return (intptr_t*)addr_at(interpreter_frame_mdp_offset); +++} +++ +++ +++// Constant pool cache +++ +++inline ConstantPoolCache** frame::interpreter_frame_cache_addr() const { +++ return (ConstantPoolCache**)addr_at(interpreter_frame_cache_offset); +++} +++ +++// Method +++ +++inline Method** frame::interpreter_frame_method_addr() const { +++ return (Method**)addr_at(interpreter_frame_method_offset); +++} +++ +++// Mirror +++ +++inline oop* frame::interpreter_frame_mirror_addr() const { +++ return (oop*)addr_at(interpreter_frame_mirror_offset); +++} +++ +++// top of expression stack +++inline intptr_t* frame::interpreter_frame_tos_address() const { +++ intptr_t* last_sp = interpreter_frame_last_sp(); +++ if (last_sp == NULL) { +++ return sp(); +++ } else { +++ // sp() may have been extended or shrunk by an adapter. At least +++ // check that we don't fall behind the legal region. +++ // For top deoptimized frame last_sp == interpreter_frame_monitor_end. +++ assert(last_sp <= (intptr_t*) interpreter_frame_monitor_end(), "bad tos"); +++ return last_sp; +++ } +++} +++ +++inline oop* frame::interpreter_frame_temp_oop_addr() const { +++ return (oop *)(fp() + interpreter_frame_oop_temp_offset); +++} +++ +++inline int frame::interpreter_frame_monitor_size() { +++ return BasicObjectLock::size(); +++} +++ +++ +++// expression stack +++// (the max_stack arguments are used by the GC; see class FrameClosure) +++ +++inline intptr_t* frame::interpreter_frame_expression_stack() const { +++ intptr_t* monitor_end = (intptr_t*) interpreter_frame_monitor_end(); +++ return monitor_end-1; +++} +++ +++ +++// Entry frames +++ +++inline JavaCallWrapper** frame::entry_frame_call_wrapper_addr() const { +++ return (JavaCallWrapper**)addr_at(entry_frame_call_wrapper_offset); +++} +++ +++ +++// Compiled frames +++ +++inline oop frame::saved_oop_result(RegisterMap* map) const { +++ oop* result_adr = (oop *)map->location(x10->as_VMReg()); +++ guarantee(result_adr != NULL, "bad register save location"); +++ return (*result_adr); +++} +++ +++inline void frame::set_saved_oop_result(RegisterMap* map, oop obj) { +++ oop* result_adr = (oop *)map->location(x10->as_VMReg()); +++ guarantee(result_adr != NULL, "bad register save location"); +++ *result_adr = obj; +++} +++ +++#endif // CPU_RISCV_FRAME_RISCV_INLINE_HPP ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/gc/g1/g1BarrierSetAssembler_riscv.cpp ++@@ -0,0 +1,478 @@ +++/* +++ * Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#include "precompiled.hpp" +++#include "asm/macroAssembler.inline.hpp" +++#include "gc/g1/g1BarrierSet.hpp" +++#include "gc/g1/g1BarrierSetAssembler.hpp" +++#include "gc/g1/g1BarrierSetRuntime.hpp" +++#include "gc/g1/g1CardTable.hpp" +++#include "gc/g1/g1ThreadLocalData.hpp" +++#include "gc/g1/heapRegion.hpp" +++#include "gc/shared/collectedHeap.hpp" +++#include "interpreter/interp_masm.hpp" +++#include "runtime/sharedRuntime.hpp" +++#include "runtime/thread.hpp" +++#ifdef COMPILER1 +++#include "c1/c1_LIRAssembler.hpp" +++#include "c1/c1_MacroAssembler.hpp" +++#include "gc/g1/c1/g1BarrierSetC1.hpp" +++#endif +++ +++#define __ masm-> +++ +++void G1BarrierSetAssembler::gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators, +++ Register addr, Register count, RegSet saved_regs) { +++ bool dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0; +++ if (!dest_uninitialized) { +++ Label done; +++ Address in_progress(xthread, in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset())); +++ +++ // Is marking active? +++ if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) { +++ __ lwu(t0, in_progress); +++ } else { +++ assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption"); +++ __ lbu(t0, in_progress); +++ } +++ __ beqz(t0, done); +++ +++ __ push_reg(saved_regs, sp); +++ if (count == c_rarg0) { +++ if (addr == c_rarg1) { +++ // exactly backwards!! +++ __ mv(t0, c_rarg0); +++ __ mv(c_rarg0, c_rarg1); +++ __ mv(c_rarg1, t0); +++ } else { +++ __ mv(c_rarg1, count); +++ __ mv(c_rarg0, addr); +++ } +++ } else { +++ __ mv(c_rarg0, addr); +++ __ mv(c_rarg1, count); +++ } +++ if (UseCompressedOops) { +++ __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_pre_narrow_oop_entry), 2); +++ } else { +++ __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_pre_oop_entry), 2); +++ } +++ __ pop_reg(saved_regs, sp); +++ +++ __ bind(done); +++ } +++} +++ +++void G1BarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, +++ Register start, Register count, Register tmp, RegSet saved_regs) { +++ __ push_reg(saved_regs, sp); +++ assert_different_registers(start, count, tmp); +++ assert_different_registers(c_rarg0, count); +++ __ mv(c_rarg0, start); +++ __ mv(c_rarg1, count); +++ __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_post_entry), 2); +++ __ pop_reg(saved_regs, sp); +++} +++ +++void G1BarrierSetAssembler::g1_write_barrier_pre(MacroAssembler* masm, +++ Register obj, +++ Register pre_val, +++ Register thread, +++ Register tmp, +++ bool tosca_live, +++ bool expand_call) { +++ // If expand_call is true then we expand the call_VM_leaf macro +++ // directly to skip generating the check by +++ // InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp. +++ +++ assert(thread == xthread, "must be"); +++ +++ Label done; +++ Label runtime; +++ +++ assert_different_registers(obj, pre_val, tmp, t0); +++ assert(pre_val != noreg && tmp != noreg, "expecting a register"); +++ +++ Address in_progress(thread, in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset())); +++ Address index(thread, in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset())); +++ Address buffer(thread, in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset())); +++ +++ // Is marking active? +++ if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) { // 4-byte width +++ __ lwu(tmp, in_progress); +++ } else { +++ assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption"); +++ __ lbu(tmp, in_progress); +++ } +++ __ beqz(tmp, done); +++ +++ // Do we need to load the previous value? +++ if (obj != noreg) { +++ __ load_heap_oop(pre_val, Address(obj, 0), noreg, noreg, AS_RAW); +++ } +++ +++ // Is the previous value null? +++ __ beqz(pre_val, done); +++ +++ // Can we store original value in the thread's buffer? +++ // Is index == 0? +++ // (The index field is typed as size_t.) +++ +++ __ ld(tmp, index); // tmp := *index_adr +++ __ beqz(tmp, runtime); // tmp == 0? +++ // If yes, goto runtime +++ +++ __ sub(tmp, tmp, wordSize); // tmp := tmp - wordSize +++ __ sd(tmp, index); // *index_adr := tmp +++ __ ld(t0, buffer); +++ __ add(tmp, tmp, t0); // tmp := tmp + *buffer_adr +++ +++ // Record the previous value +++ __ sd(pre_val, Address(tmp, 0)); +++ __ j(done); +++ +++ __ bind(runtime); +++ // save the live input values +++ RegSet saved = RegSet::of(pre_val); +++ if (tosca_live) { saved += RegSet::of(x10); } +++ if (obj != noreg) { saved += RegSet::of(obj); } +++ +++ __ push_reg(saved, sp); +++ +++ if (expand_call) { +++ assert(pre_val != c_rarg1, "smashed arg"); +++ __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), pre_val, thread); +++ } else { +++ __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), pre_val, thread); +++ } +++ +++ __ pop_reg(saved, sp); +++ +++ __ bind(done); +++ +++} +++ +++void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm, +++ Register store_addr, +++ Register new_val, +++ Register thread, +++ Register tmp, +++ Register tmp2) { +++ assert(thread == xthread, "must be"); +++ assert_different_registers(store_addr, new_val, thread, tmp, tmp2, +++ t0); +++ assert(store_addr != noreg && new_val != noreg && tmp != noreg && +++ tmp2 != noreg, "expecting a register"); +++ +++ Address queue_index(thread, in_bytes(G1ThreadLocalData::dirty_card_queue_index_offset())); +++ Address buffer(thread, in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset())); +++ +++ BarrierSet* bs = BarrierSet::barrier_set(); +++ CardTableBarrierSet* ctbs = barrier_set_cast(bs); +++ CardTable* ct = ctbs->card_table(); +++ +++ Label done; +++ Label runtime; +++ +++ // Does store cross heap regions? +++ +++ __ xorr(tmp, store_addr, new_val); +++ __ srli(tmp, tmp, HeapRegion::LogOfHRGrainBytes); +++ __ beqz(tmp, done); +++ +++ // crosses regions, storing NULL? +++ +++ __ beqz(new_val, done); +++ +++ // storing region crossing non-NULL, is card already dirty? +++ +++ ExternalAddress cardtable((address) ct->byte_map_base()); +++ const Register card_addr = tmp; +++ +++ __ srli(card_addr, store_addr, CardTable::card_shift); +++ +++ // get the address of the card +++ __ load_byte_map_base(tmp2); +++ __ add(card_addr, card_addr, tmp2); +++ __ lbu(tmp2, Address(card_addr)); +++ __ mv(t0, (int)G1CardTable::g1_young_card_val()); +++ __ beq(tmp2, t0, done); +++ +++ assert((int)CardTable::dirty_card_val() == 0, "must be 0"); +++ +++ __ membar(MacroAssembler::StoreLoad); +++ +++ __ lbu(tmp2, Address(card_addr)); +++ __ beqz(tmp2, done); +++ +++ // storing a region crossing, non-NULL oop, card is clean. +++ // dirty card and log. +++ +++ __ sb(zr, Address(card_addr)); +++ +++ __ ld(t0, queue_index); +++ __ beqz(t0, runtime); +++ __ sub(t0, t0, wordSize); +++ __ sd(t0, queue_index); +++ +++ __ ld(tmp2, buffer); +++ __ add(t0, tmp2, t0); +++ __ sd(card_addr, Address(t0, 0)); +++ __ j(done); +++ +++ __ bind(runtime); +++ // save the live input values +++ RegSet saved = RegSet::of(store_addr); +++ __ push_reg(saved, sp); +++ __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry), card_addr, thread); +++ __ pop_reg(saved, sp); +++ +++ __ bind(done); +++} +++ +++void G1BarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, +++ Register dst, Address src, Register tmp1, Register tmp_thread) { +++ bool on_oop = is_reference_type(type); +++ bool on_weak = (decorators & ON_WEAK_OOP_REF) != 0; +++ bool on_phantom = (decorators & ON_PHANTOM_OOP_REF) != 0; +++ bool on_reference = on_weak || on_phantom; +++ ModRefBarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread); +++ if (on_oop && on_reference) { +++ // RA is live. It must be saved around calls. +++ __ enter(); // barrier may call runtime +++ // Generate the G1 pre-barrier code to log the value of +++ // the referent field in an SATB buffer. +++ g1_write_barrier_pre(masm /* masm */, +++ noreg /* obj */, +++ dst /* pre_val */, +++ xthread /* thread */, +++ tmp1 /* tmp */, +++ true /* tosca_live */, +++ true /* expand_call */); +++ __ leave(); +++ } +++} +++ +++void G1BarrierSetAssembler::oop_store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, +++ Address dst, Register val, Register tmp1, Register tmp2) { +++ // flatten object address if needed +++ if (dst.offset() == 0) { +++ if (dst.base() != x13) { +++ __ mv(x13, dst.base()); +++ } +++ } else { +++ __ la(x13, dst); +++ } +++ +++ g1_write_barrier_pre(masm, +++ x13 /* obj */, +++ tmp2 /* pre_val */, +++ xthread /* thread */, +++ tmp1 /* tmp */, +++ val != noreg /* tosca_live */, +++ false /* expand_call */); +++ +++ if (val == noreg) { +++ BarrierSetAssembler::store_at(masm, decorators, type, Address(x13, 0), noreg, noreg, noreg); +++ } else { +++ // G1 barrier needs uncompressed oop for region cross check. +++ Register new_val = val; +++ if (UseCompressedOops) { +++ new_val = t1; +++ __ mv(new_val, val); +++ } +++ BarrierSetAssembler::store_at(masm, decorators, type, Address(x13, 0), val, noreg, noreg); +++ g1_write_barrier_post(masm, +++ x13 /* store_adr */, +++ new_val /* new_val */, +++ xthread /* thread */, +++ tmp1 /* tmp */, +++ tmp2 /* tmp2 */); +++ } +++} +++ +++#ifdef COMPILER1 +++ +++#undef __ +++#define __ ce->masm()-> +++ +++void G1BarrierSetAssembler::gen_pre_barrier_stub(LIR_Assembler* ce, G1PreBarrierStub* stub) { +++ G1BarrierSetC1* bs = (G1BarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1(); +++ +++ // At this point we know that marking is in progress. +++ // If do_load() is true then we have to emit the +++ // load of the previous value; otherwise it has already +++ // been loaded into _pre_val. +++ __ bind(*stub->entry()); +++ +++ assert(stub->pre_val()->is_register(), "Precondition."); +++ +++ Register pre_val_reg = stub->pre_val()->as_register(); +++ +++ if (stub->do_load()) { +++ ce->mem2reg(stub->addr(), stub->pre_val(), T_OBJECT, stub->patch_code(), stub->info(), false /* wide */, false /* unaligned */); +++ } +++ __ beqz(pre_val_reg, *stub->continuation(), /* is_far */ true); +++ ce->store_parameter(stub->pre_val()->as_register(), 0); +++ __ far_call(RuntimeAddress(bs->pre_barrier_c1_runtime_code_blob()->code_begin())); +++ __ j(*stub->continuation()); +++} +++ +++void G1BarrierSetAssembler::gen_post_barrier_stub(LIR_Assembler* ce, G1PostBarrierStub* stub) { +++ G1BarrierSetC1* bs = (G1BarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1(); +++ __ bind(*stub->entry()); +++ assert(stub->addr()->is_register(), "Precondition"); +++ assert(stub->new_val()->is_register(), "Precondition"); +++ Register new_val_reg = stub->new_val()->as_register(); +++ __ beqz(new_val_reg, *stub->continuation(), /* is_far */ true); +++ ce->store_parameter(stub->addr()->as_pointer_register(), 0); +++ __ far_call(RuntimeAddress(bs->post_barrier_c1_runtime_code_blob()->code_begin())); +++ __ j(*stub->continuation()); +++} +++ +++#undef __ +++ +++#define __ sasm-> +++ +++void G1BarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm) { +++ __ prologue("g1_pre_barrier", false); +++ +++ BarrierSet* bs = BarrierSet::barrier_set(); +++ +++ // arg0 : previous value of memory +++ const Register pre_val = x10; +++ const Register thread = xthread; +++ const Register tmp = t0; +++ +++ Address in_progress(thread, in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset())); +++ Address queue_index(thread, in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset())); +++ Address buffer(thread, in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset())); +++ +++ Label done; +++ Label runtime; +++ +++ // Is marking still active? +++ if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) { // 4-byte width +++ __ lwu(tmp, in_progress); +++ } else { +++ assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption"); +++ __ lbu(tmp, in_progress); +++ } +++ __ beqz(tmp, done); +++ +++ // Can we store original value in the thread's buffer? +++ __ ld(tmp, queue_index); +++ __ beqz(tmp, runtime); +++ +++ __ sub(tmp, tmp, wordSize); +++ __ sd(tmp, queue_index); +++ __ ld(t1, buffer); +++ __ add(tmp, tmp, t1); +++ __ load_parameter(0, t1); +++ __ sd(t1, Address(tmp, 0)); +++ __ j(done); +++ +++ __ bind(runtime); +++ __ push_call_clobbered_registers(); +++ __ load_parameter(0, pre_val); +++ __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), pre_val, thread); +++ __ pop_call_clobbered_registers(); +++ __ bind(done); +++ +++ __ epilogue(); +++} +++ +++void G1BarrierSetAssembler::generate_c1_post_barrier_runtime_stub(StubAssembler* sasm) { +++ __ prologue("g1_post_barrier", false); +++ +++ // arg0 : store_address +++ Address store_addr(fp, 2 * BytesPerWord); // 2 BytesPerWord from fp +++ +++ BarrierSet* bs = BarrierSet::barrier_set(); +++ CardTableBarrierSet* ctbs = barrier_set_cast(bs); +++ CardTable* ct = ctbs->card_table(); +++ +++ Label done; +++ Label runtime; +++ +++ // At this point we know new_value is non-NULL and the new_value crosses regions. +++ // Must check to see if card is already dirty +++ const Register thread = xthread; +++ +++ Address queue_index(thread, in_bytes(G1ThreadLocalData::dirty_card_queue_index_offset())); +++ Address buffer(thread, in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset())); +++ +++ const Register card_offset = t1; +++ // RA is free here, so we can use it to hold the byte_map_base. +++ const Register byte_map_base = ra; +++ +++ assert_different_registers(card_offset, byte_map_base, t0); +++ +++ __ load_parameter(0, card_offset); +++ __ srli(card_offset, card_offset, CardTable::card_shift); +++ __ load_byte_map_base(byte_map_base); +++ +++ // Convert card offset into an address in card_addr +++ Register card_addr = card_offset; +++ __ add(card_addr, byte_map_base, card_addr); +++ +++ __ lbu(t0, Address(card_addr, 0)); +++ __ sub(t0, t0, (int)G1CardTable::g1_young_card_val()); +++ __ beqz(t0, done); +++ +++ assert((int)CardTable::dirty_card_val() == 0, "must be 0"); +++ +++ __ membar(MacroAssembler::StoreLoad); +++ __ lbu(t0, Address(card_addr, 0)); +++ __ beqz(t0, done); +++ +++ // storing region crossing non-NULL, card is clean. +++ // dirty card and log. +++ __ sb(zr, Address(card_addr, 0)); +++ +++ __ ld(t0, queue_index); +++ __ beqz(t0, runtime); +++ __ sub(t0, t0, wordSize); +++ __ sd(t0, queue_index); +++ +++ // Reuse RA to hold buffer_addr +++ const Register buffer_addr = ra; +++ +++ __ ld(buffer_addr, buffer); +++ __ add(t0, buffer_addr, t0); +++ __ sd(card_addr, Address(t0, 0)); +++ __ j(done); +++ +++ __ bind(runtime); +++ __ push_call_clobbered_registers(); +++ __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry), card_addr, thread); +++ __ pop_call_clobbered_registers(); +++ __ bind(done); +++ __ epilogue(); +++} +++ +++#undef __ +++ +++#endif // COMPILER1 ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/gc/g1/g1BarrierSetAssembler_riscv.hpp ++@@ -0,0 +1,78 @@ +++/* +++ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#ifndef CPU_RISCV_GC_G1_G1BARRIERSETASSEMBLER_RISCV_HPP +++#define CPU_RISCV_GC_G1_G1BARRIERSETASSEMBLER_RISCV_HPP +++ +++#include "asm/macroAssembler.hpp" +++#include "gc/shared/modRefBarrierSetAssembler.hpp" +++#include "utilities/macros.hpp" +++ +++#ifdef COMPILER1 +++class LIR_Assembler; +++#endif +++class StubAssembler; +++class G1PreBarrierStub; +++class G1PostBarrierStub; +++ +++class G1BarrierSetAssembler: public ModRefBarrierSetAssembler { +++protected: +++ void gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators, +++ Register addr, Register count, RegSet saved_regs); +++ void gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, +++ Register start, Register count, Register tmp, RegSet saved_regs); +++ +++ void g1_write_barrier_pre(MacroAssembler* masm, +++ Register obj, +++ Register pre_val, +++ Register thread, +++ Register tmp, +++ bool tosca_live, +++ bool expand_call); +++ +++ void g1_write_barrier_post(MacroAssembler* masm, +++ Register store_addr, +++ Register new_val, +++ Register thread, +++ Register tmp, +++ Register tmp2); +++ +++ virtual void oop_store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, +++ Address dst, Register val, Register tmp1, Register tmp2); +++ +++public: +++#ifdef COMPILER1 +++ void gen_pre_barrier_stub(LIR_Assembler* ce, G1PreBarrierStub* stub); +++ void gen_post_barrier_stub(LIR_Assembler* ce, G1PostBarrierStub* stub); +++ +++ void generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm); +++ void generate_c1_post_barrier_runtime_stub(StubAssembler* sasm); +++#endif +++ +++ void load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, +++ Register dst, Address src, Register tmp1, Register tmp_thread); +++}; +++ +++#endif // CPU_RISCV_GC_G1_G1BARRIERSETASSEMBLER_RISCV_HPP ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/gc/g1/g1Globals_riscv.hpp ++@@ -0,0 +1,31 @@ +++/* +++ * Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2021, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#ifndef CPU_RISCV_GC_G1_G1GLOBALS_RISCV_HPP +++#define CPU_RISCV_GC_G1_G1GLOBALS_RISCV_HPP +++ +++const size_t G1MergeHeapRootsPrefetchCacheSize = 16; +++ +++#endif // CPU_RISCV_GC_G1_G1GLOBALS_RISCV_HPP ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/gc/shared/barrierSetAssembler_riscv.cpp ++@@ -0,0 +1,298 @@ +++/* +++ * Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#include "precompiled.hpp" +++#include "classfile/classLoaderData.hpp" +++#include "gc/shared/barrierSet.hpp" +++#include "gc/shared/barrierSetAssembler.hpp" +++#include "gc/shared/barrierSetNMethod.hpp" +++#include "gc/shared/collectedHeap.hpp" +++#include "interpreter/interp_masm.hpp" +++#include "memory/universe.hpp" +++#include "runtime/jniHandles.hpp" +++#include "runtime/sharedRuntime.hpp" +++#include "runtime/stubRoutines.hpp" +++#include "runtime/thread.hpp" +++ +++#define __ masm-> +++ +++void BarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, +++ Register dst, Address src, Register tmp1, Register tmp_thread) { +++ // RA is live. It must be saved around calls. +++ +++ bool in_heap = (decorators & IN_HEAP) != 0; +++ bool in_native = (decorators & IN_NATIVE) != 0; +++ bool is_not_null = (decorators & IS_NOT_NULL) != 0; +++ switch (type) { +++ case T_OBJECT: // fall through +++ case T_ARRAY: { +++ if (in_heap) { +++ if (UseCompressedOops) { +++ __ lwu(dst, src); +++ if (is_not_null) { +++ __ decode_heap_oop_not_null(dst); +++ } else { +++ __ decode_heap_oop(dst); +++ } +++ } else { +++ __ ld(dst, src); +++ } +++ } else { +++ assert(in_native, "why else?"); +++ __ ld(dst, src); +++ } +++ break; +++ } +++ case T_BOOLEAN: __ load_unsigned_byte (dst, src); break; +++ case T_BYTE: __ load_signed_byte (dst, src); break; +++ case T_CHAR: __ load_unsigned_short(dst, src); break; +++ case T_SHORT: __ load_signed_short (dst, src); break; +++ case T_INT: __ lw (dst, src); break; +++ case T_LONG: __ ld (dst, src); break; +++ case T_ADDRESS: __ ld (dst, src); break; +++ case T_FLOAT: __ flw (f10, src); break; +++ case T_DOUBLE: __ fld (f10, src); break; +++ default: Unimplemented(); +++ } +++} +++ +++void BarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, +++ Address dst, Register val, Register tmp1, Register tmp2) { +++ bool in_heap = (decorators & IN_HEAP) != 0; +++ bool in_native = (decorators & IN_NATIVE) != 0; +++ switch (type) { +++ case T_OBJECT: // fall through +++ case T_ARRAY: { +++ val = val == noreg ? zr : val; +++ if (in_heap) { +++ if (UseCompressedOops) { +++ assert(!dst.uses(val), "not enough registers"); +++ if (val != zr) { +++ __ encode_heap_oop(val); +++ } +++ __ sw(val, dst); +++ } else { +++ __ sd(val, dst); +++ } +++ } else { +++ assert(in_native, "why else?"); +++ __ sd(val, dst); +++ } +++ break; +++ } +++ case T_BOOLEAN: +++ __ andi(val, val, 0x1); // boolean is true if LSB is 1 +++ __ sb(val, dst); +++ break; +++ case T_BYTE: __ sb(val, dst); break; +++ case T_CHAR: __ sh(val, dst); break; +++ case T_SHORT: __ sh(val, dst); break; +++ case T_INT: __ sw(val, dst); break; +++ case T_LONG: __ sd(val, dst); break; +++ case T_ADDRESS: __ sd(val, dst); break; +++ case T_FLOAT: __ fsw(f10, dst); break; +++ case T_DOUBLE: __ fsd(f10, dst); break; +++ default: Unimplemented(); +++ } +++ +++} +++ +++void BarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env, +++ Register obj, Register tmp, Label& slowpath) { +++ // If mask changes we need to ensure that the inverse is still encodable as an immediate +++ STATIC_ASSERT(JNIHandles::weak_tag_mask == 1); +++ __ andi(obj, obj, ~JNIHandles::weak_tag_mask); +++ __ ld(obj, Address(obj, 0)); // *obj +++} +++ +++// Defines obj, preserves var_size_in_bytes, okay for tmp2 == var_size_in_bytes. +++void BarrierSetAssembler::tlab_allocate(MacroAssembler* masm, Register obj, +++ Register var_size_in_bytes, +++ int con_size_in_bytes, +++ Register tmp1, +++ Register tmp2, +++ Label& slow_case, +++ bool is_far) { +++ assert_different_registers(obj, tmp2); +++ assert_different_registers(obj, var_size_in_bytes); +++ Register end = tmp2; +++ +++ __ ld(obj, Address(xthread, JavaThread::tlab_top_offset())); +++ if (var_size_in_bytes == noreg) { +++ __ la(end, Address(obj, con_size_in_bytes)); +++ } else { +++ __ add(end, obj, var_size_in_bytes); +++ } +++ __ ld(t0, Address(xthread, JavaThread::tlab_end_offset())); +++ __ bgtu(end, t0, slow_case, is_far); +++ +++ // update the tlab top pointer +++ __ sd(end, Address(xthread, JavaThread::tlab_top_offset())); +++ +++ // recover var_size_in_bytes if necessary +++ if (var_size_in_bytes == end) { +++ __ sub(var_size_in_bytes, var_size_in_bytes, obj); +++ } +++} +++ +++// Defines obj, preserves var_size_in_bytes +++void BarrierSetAssembler::eden_allocate(MacroAssembler* masm, Register obj, +++ Register var_size_in_bytes, +++ int con_size_in_bytes, +++ Register tmp1, +++ Label& slow_case, +++ bool is_far) { +++ assert_cond(masm != NULL); +++ assert_different_registers(obj, var_size_in_bytes, tmp1); +++ if (!Universe::heap()->supports_inline_contig_alloc()) { +++ __ j(slow_case); +++ } else { +++ Register end = tmp1; +++ Label retry; +++ __ bind(retry); +++ +++ // Get the current end of the heap +++ ExternalAddress address_end((address) Universe::heap()->end_addr()); +++ { +++ int32_t offset; +++ __ la_patchable(t1, address_end, offset); +++ __ ld(t1, Address(t1, offset)); +++ } +++ +++ // Get the current top of the heap +++ ExternalAddress address_top((address) Universe::heap()->top_addr()); +++ { +++ int32_t offset; +++ __ la_patchable(t0, address_top, offset); +++ __ addi(t0, t0, offset); +++ __ lr_d(obj, t0, Assembler::aqrl); +++ } +++ +++ // Adjust it my the size of our new object +++ if (var_size_in_bytes == noreg) { +++ __ la(end, Address(obj, con_size_in_bytes)); +++ } else { +++ __ add(end, obj, var_size_in_bytes); +++ } +++ +++ // if end < obj then we wrapped around high memory +++ __ bltu(end, obj, slow_case, is_far); +++ +++ __ bgtu(end, t1, slow_case, is_far); +++ +++ // If heap_top hasn't been changed by some other thread, update it. +++ __ sc_d(t1, end, t0, Assembler::rl); +++ __ bnez(t1, retry); +++ +++ incr_allocated_bytes(masm, var_size_in_bytes, con_size_in_bytes, tmp1); +++ } +++} +++ +++void BarrierSetAssembler::incr_allocated_bytes(MacroAssembler* masm, +++ Register var_size_in_bytes, +++ int con_size_in_bytes, +++ Register tmp1) { +++ assert(tmp1->is_valid(), "need temp reg"); +++ +++ __ ld(tmp1, Address(xthread, in_bytes(JavaThread::allocated_bytes_offset()))); +++ if (var_size_in_bytes->is_valid()) { +++ __ add(tmp1, tmp1, var_size_in_bytes); +++ } else { +++ __ add(tmp1, tmp1, con_size_in_bytes); +++ } +++ __ sd(tmp1, Address(xthread, in_bytes(JavaThread::allocated_bytes_offset()))); +++} +++ +++void BarrierSetAssembler::nmethod_entry_barrier(MacroAssembler* masm) { +++ BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod(); +++ +++ if (bs_nm == NULL) { +++ return; +++ } +++ +++ Assembler::IncompressibleRegion ir(masm); // Fixed length: see entry_barrier_offset() +++ +++ // RISCV atomic operations require that the memory address be naturally aligned. +++ __ align(4); +++ +++ Label skip, guard; +++ Address thread_disarmed_addr(xthread, in_bytes(bs_nm->thread_disarmed_offset())); +++ +++ __ lwu(t0, guard); +++ +++ // Subsequent loads of oops must occur after load of guard value. +++ // BarrierSetNMethod::disarm sets guard with release semantics. +++ __ membar(MacroAssembler::LoadLoad); +++ __ lwu(t1, thread_disarmed_addr); +++ __ beq(t0, t1, skip); +++ +++ int32_t offset = 0; +++ __ movptr(t0, StubRoutines::riscv::method_entry_barrier(), offset); +++ __ jalr(ra, t0, offset); +++ __ j(skip); +++ +++ __ bind(guard); +++ +++ MacroAssembler::assert_alignment(__ pc()); +++ __ emit_int32(0); // nmethod guard value. Skipped over in common case. +++ +++ __ bind(skip); +++} +++ +++void BarrierSetAssembler::c2i_entry_barrier(MacroAssembler* masm) { +++ BarrierSetNMethod* bs = BarrierSet::barrier_set()->barrier_set_nmethod(); +++ if (bs == NULL) { +++ return; +++ } +++ +++ Label bad_call; +++ __ beqz(xmethod, bad_call); +++ +++ // Pointer chase to the method holder to find out if the method is concurrently unloading. +++ Label method_live; +++ __ load_method_holder_cld(t0, xmethod); +++ +++ // Is it a strong CLD? +++ __ lwu(t1, Address(t0, ClassLoaderData::keep_alive_offset())); +++ __ bnez(t1, method_live); +++ +++ // Is it a weak but alive CLD? +++ __ push_reg(RegSet::of(x28, x29), sp); +++ +++ __ ld(x28, Address(t0, ClassLoaderData::holder_offset())); +++ +++ // Uses x28 & x29, so we must pass new temporaries. +++ __ resolve_weak_handle(x28, x29); +++ __ mv(t0, x28); +++ +++ __ pop_reg(RegSet::of(x28, x29), sp); +++ +++ __ bnez(t0, method_live); +++ +++ __ bind(bad_call); +++ +++ __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub())); +++ __ bind(method_live); +++} ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/gc/shared/barrierSetAssembler_riscv.hpp ++@@ -0,0 +1,79 @@ +++/* +++ * Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#ifndef CPU_RISCV_GC_SHARED_BARRIERSETASSEMBLER_RISCV_HPP +++#define CPU_RISCV_GC_SHARED_BARRIERSETASSEMBLER_RISCV_HPP +++ +++#include "asm/macroAssembler.hpp" +++#include "gc/shared/barrierSet.hpp" +++#include "gc/shared/barrierSetNMethod.hpp" +++#include "memory/allocation.hpp" +++#include "oops/access.hpp" +++ +++class BarrierSetAssembler: public CHeapObj { +++private: +++ void incr_allocated_bytes(MacroAssembler* masm, +++ Register var_size_in_bytes, int con_size_in_bytes, +++ Register t1 = noreg); +++ +++public: +++ virtual void arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop, +++ Register src, Register dst, Register count, RegSet saved_regs) {} +++ virtual void arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop, +++ Register start, Register end, Register tmp, RegSet saved_regs) {} +++ virtual void load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, +++ Register dst, Address src, Register tmp1, Register tmp_thread); +++ virtual void store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, +++ Address dst, Register val, Register tmp1, Register tmp2); +++ +++ virtual void try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env, +++ Register obj, Register tmp, Label& slowpath); +++ +++ virtual void tlab_allocate(MacroAssembler* masm, +++ Register obj, // result: pointer to object after successful allocation +++ Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise +++ int con_size_in_bytes, // object size in bytes if known at compile time +++ Register tmp1, // temp register +++ Register tmp2, // temp register +++ Label& slow_case, // continuation point if fast allocation fails +++ bool is_far = false +++ ); +++ +++ void eden_allocate(MacroAssembler* masm, +++ Register obj, // result: pointer to object after successful allocation +++ Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise +++ int con_size_in_bytes, // object size in bytes if known at compile time +++ Register tmp1, // temp register +++ Label& slow_case, // continuation point if fast allocation fails +++ bool is_far = false +++ ); +++ virtual void barrier_stubs_init() {} +++ +++ virtual void nmethod_entry_barrier(MacroAssembler* masm); +++ virtual void c2i_entry_barrier(MacroAssembler* masm); +++ virtual ~BarrierSetAssembler() {} +++}; +++ +++#endif // CPU_RISCV_GC_SHARED_BARRIERSETASSEMBLER_RISCV_HPP ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/gc/shared/barrierSetNMethod_riscv.cpp ++@@ -0,0 +1,171 @@ +++/* +++ * Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#include "precompiled.hpp" +++#include "code/codeCache.hpp" +++#include "code/nativeInst.hpp" +++#include "gc/shared/barrierSetNMethod.hpp" +++#include "logging/log.hpp" +++#include "memory/resourceArea.hpp" +++#include "runtime/sharedRuntime.hpp" +++#include "runtime/registerMap.hpp" +++#include "runtime/thread.hpp" +++#include "utilities/align.hpp" +++#include "utilities/debug.hpp" +++ +++class NativeNMethodBarrier: public NativeInstruction { +++ address instruction_address() const { return addr_at(0); } +++ +++ int *guard_addr() { +++ /* auipc + lwu + fence + lwu + beq + lui + addi + slli + addi + slli + jalr + j */ +++ return reinterpret_cast(instruction_address() + 12 * 4); +++ } +++ +++public: +++ int get_value() { +++ return Atomic::load_acquire(guard_addr()); +++ } +++ +++ void set_value(int value) { +++ Atomic::release_store(guard_addr(), value); +++ } +++ +++ void verify() const; +++}; +++ +++// Store the instruction bitmask, bits and name for checking the barrier. +++struct CheckInsn { +++ uint32_t mask; +++ uint32_t bits; +++ const char *name; +++}; +++ +++static const struct CheckInsn barrierInsn[] = { +++ { 0x00000fff, 0x00000297, "auipc t0, 0 "}, +++ { 0x000fffff, 0x0002e283, "lwu t0, 48(t0) "}, +++ { 0xffffffff, 0x0aa0000f, "fence ir, ir "}, +++ { 0x000fffff, 0x000be303, "lwu t1, 112(xthread)"}, +++ { 0x01fff07f, 0x00628063, "beq t0, t1, skip "}, +++ { 0x00000fff, 0x000002b7, "lui t0, imm0 "}, +++ { 0x000fffff, 0x00028293, "addi t0, t0, imm1 "}, +++ { 0xffffffff, 0x00b29293, "slli t0, t0, 11 "}, +++ { 0x000fffff, 0x00028293, "addi t0, t0, imm2 "}, +++ { 0xffffffff, 0x00629293, "slli t0, t0, 6 "}, +++ { 0x000fffff, 0x000280e7, "jalr ra, imm3(t0) "}, +++ { 0x00000fff, 0x0000006f, "j skip "} +++ /* guard: */ +++ /* 32bit nmethod guard value */ +++ /* skip: */ +++}; +++ +++// The encodings must match the instructions emitted by +++// BarrierSetAssembler::nmethod_entry_barrier. The matching ignores the specific +++// register numbers and immediate values in the encoding. +++void NativeNMethodBarrier::verify() const { +++ intptr_t addr = (intptr_t) instruction_address(); +++ for(unsigned int i = 0; i < sizeof(barrierInsn)/sizeof(struct CheckInsn); i++ ) { +++ uint32_t inst = *((uint32_t*) addr); +++ if ((inst & barrierInsn[i].mask) != barrierInsn[i].bits) { +++ tty->print_cr("Addr: " INTPTR_FORMAT " Code: 0x%x", addr, inst); +++ fatal("not an %s instruction.", barrierInsn[i].name); +++ } +++ addr += 4; +++ } +++} +++ +++ +++/* We're called from an nmethod when we need to deoptimize it. We do +++ this by throwing away the nmethod's frame and jumping to the +++ ic_miss stub. This looks like there has been an IC miss at the +++ entry of the nmethod, so we resolve the call, which will fall back +++ to the interpreter if the nmethod has been unloaded. */ +++void BarrierSetNMethod::deoptimize(nmethod* nm, address* return_address_ptr) { +++ +++ typedef struct { +++ intptr_t *sp; intptr_t *fp; address ra; address pc; +++ } frame_pointers_t; +++ +++ frame_pointers_t *new_frame = (frame_pointers_t *)(return_address_ptr - 5); +++ +++ JavaThread *thread = JavaThread::current(); +++ RegisterMap reg_map(thread, false); +++ frame frame = thread->last_frame(); +++ +++ assert(frame.is_compiled_frame() || frame.is_native_frame(), "must be"); +++ assert(frame.cb() == nm, "must be"); +++ frame = frame.sender(®_map); +++ +++ LogTarget(Trace, nmethod, barrier) out; +++ if (out.is_enabled()) { +++ ResourceMark mark; +++ log_trace(nmethod, barrier)("deoptimize(nmethod: %s(%p), return_addr: %p, osr: %d, thread: %p(%s), making rsp: %p) -> %p", +++ nm->method()->name_and_sig_as_C_string(), +++ nm, *(address *) return_address_ptr, nm->is_osr_method(), thread, +++ thread->name(), frame.sp(), nm->verified_entry_point()); +++ } +++ +++ new_frame->sp = frame.sp(); +++ new_frame->fp = frame.fp(); +++ new_frame->ra = frame.pc(); +++ new_frame->pc = SharedRuntime::get_handle_wrong_method_stub(); +++} +++ +++// This is the offset of the entry barrier from where the frame is completed. +++// If any code changes between the end of the verified entry where the entry +++// barrier resides, and the completion of the frame, then +++// NativeNMethodCmpBarrier::verify() will immediately complain when it does +++// not find the expected native instruction at this offset, which needs updating. +++// Note that this offset is invariant of PreserveFramePointer. +++ +++// see BarrierSetAssembler::nmethod_entry_barrier +++// auipc + lwu + fence + lwu + beq + movptr(5 instructions) + jalr + j + int32 +++static const int entry_barrier_offset = -4 * 13; +++ +++static NativeNMethodBarrier* native_nmethod_barrier(nmethod* nm) { +++ address barrier_address = nm->code_begin() + nm->frame_complete_offset() + entry_barrier_offset; +++ NativeNMethodBarrier* barrier = reinterpret_cast(barrier_address); +++ debug_only(barrier->verify()); +++ return barrier; +++} +++ +++void BarrierSetNMethod::disarm(nmethod* nm) { +++ if (!supports_entry_barrier(nm)) { +++ return; +++ } +++ +++ // Disarms the nmethod guard emitted by BarrierSetAssembler::nmethod_entry_barrier. +++ NativeNMethodBarrier* barrier = native_nmethod_barrier(nm); +++ +++ barrier->set_value(disarmed_value()); +++} +++ +++bool BarrierSetNMethod::is_armed(nmethod* nm) { +++ if (!supports_entry_barrier(nm)) { +++ return false; +++ } +++ +++ NativeNMethodBarrier* barrier = native_nmethod_barrier(nm); +++ return barrier->get_value() != disarmed_value(); +++} ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/gc/shared/cardTableBarrierSetAssembler_riscv.cpp ++@@ -0,0 +1,107 @@ +++/* +++ * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#include "precompiled.hpp" +++#include "asm/macroAssembler.inline.hpp" +++#include "gc/shared/barrierSet.hpp" +++#include "gc/shared/cardTable.hpp" +++#include "gc/shared/cardTableBarrierSet.hpp" +++#include "gc/shared/cardTableBarrierSetAssembler.hpp" +++#include "gc/shared/gc_globals.hpp" +++#include "interpreter/interp_masm.hpp" +++ +++#define __ masm-> +++ +++ +++void CardTableBarrierSetAssembler::store_check(MacroAssembler* masm, Register obj, Register tmp) { +++ assert_different_registers(obj, tmp); +++ BarrierSet* bs = BarrierSet::barrier_set(); +++ assert(bs->kind() == BarrierSet::CardTableBarrierSet, "Wrong barrier set kind"); +++ +++ __ srli(obj, obj, CardTable::card_shift); +++ +++ assert(CardTable::dirty_card_val() == 0, "must be"); +++ +++ __ load_byte_map_base(tmp); +++ __ add(tmp, obj, tmp); +++ +++ if (UseCondCardMark) { +++ Label L_already_dirty; +++ __ lbu(t1, Address(tmp)); +++ __ beqz(t1, L_already_dirty); +++ __ sb(zr, Address(tmp)); +++ __ bind(L_already_dirty); +++ } else { +++ __ sb(zr, Address(tmp)); +++ } +++} +++ +++void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, +++ Register start, Register count, Register tmp, RegSet saved_regs) { +++ assert_different_registers(start, tmp); +++ assert_different_registers(count, tmp); +++ +++ Label L_loop, L_done; +++ const Register end = count; +++ +++ __ beqz(count, L_done); // zero count - nothing to do +++ // end = start + count << LogBytesPerHeapOop +++ __ shadd(end, count, start, count, LogBytesPerHeapOop); +++ __ sub(end, end, BytesPerHeapOop); // last element address to make inclusive +++ +++ __ srli(start, start, CardTable::card_shift); +++ __ srli(end, end, CardTable::card_shift); +++ __ sub(count, end, start); // number of bytes to copy +++ +++ __ load_byte_map_base(tmp); +++ __ add(start, start, tmp); +++ +++ __ bind(L_loop); +++ __ add(tmp, start, count); +++ __ sb(zr, Address(tmp)); +++ __ sub(count, count, 1); +++ __ bgez(count, L_loop); +++ __ bind(L_done); +++} +++ +++void CardTableBarrierSetAssembler::oop_store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, +++ Address dst, Register val, Register tmp1, Register tmp2) { +++ bool in_heap = (decorators & IN_HEAP) != 0; +++ bool is_array = (decorators & IS_ARRAY) != 0; +++ bool on_anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0; +++ bool precise = is_array || on_anonymous; +++ +++ bool needs_post_barrier = val != noreg && in_heap; +++ BarrierSetAssembler::store_at(masm, decorators, type, dst, val, noreg, noreg); +++ if (needs_post_barrier) { +++ // flatten object address if needed +++ if (!precise || dst.offset() == 0) { +++ store_check(masm, dst.base(), x13); +++ } else { +++ __ la(x13, dst); +++ store_check(masm, x13, t0); +++ } +++ } +++} ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/gc/shared/cardTableBarrierSetAssembler_riscv.hpp ++@@ -0,0 +1,42 @@ +++/* +++ * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#ifndef CPU_RISCV_GC_SHARED_CARDTABLEBARRIERSETASSEMBLER_RISCV_HPP +++#define CPU_RISCV_GC_SHARED_CARDTABLEBARRIERSETASSEMBLER_RISCV_HPP +++ +++#include "asm/macroAssembler.hpp" +++#include "gc/shared/modRefBarrierSetAssembler.hpp" +++ +++class CardTableBarrierSetAssembler: public ModRefBarrierSetAssembler { +++protected: +++ void store_check(MacroAssembler* masm, Register obj, Register tmp); +++ +++ virtual void gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, +++ Register start, Register count, Register tmp, RegSet saved_regs); +++ virtual void oop_store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, +++ Address dst, Register val, Register tmp1, Register tmp2); +++}; +++ +++#endif // #ifndef CPU_RISCV_GC_SHARED_CARDTABLEBARRIERSETASSEMBLER_RISCV_HPP ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/gc/shared/modRefBarrierSetAssembler_riscv.cpp ++@@ -0,0 +1,54 @@ +++/* +++ * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#include "precompiled.hpp" +++#include "asm/macroAssembler.inline.hpp" +++#include "gc/shared/modRefBarrierSetAssembler.hpp" +++ +++#define __ masm-> +++ +++void ModRefBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop, +++ Register src, Register dst, Register count, RegSet saved_regs) { +++ if (is_oop) { +++ gen_write_ref_array_pre_barrier(masm, decorators, dst, count, saved_regs); +++ } +++} +++ +++void ModRefBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop, +++ Register start, Register count, Register tmp, +++ RegSet saved_regs) { +++ if (is_oop) { +++ gen_write_ref_array_post_barrier(masm, decorators, start, count, tmp, saved_regs); +++ } +++} +++ +++void ModRefBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, +++ Address dst, Register val, Register tmp1, Register tmp2) { +++ if (is_reference_type(type)) { +++ oop_store_at(masm, decorators, type, dst, val, tmp1, tmp2); +++ } else { +++ BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2); +++ } +++} ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/gc/shared/modRefBarrierSetAssembler_riscv.hpp ++@@ -0,0 +1,55 @@ +++/* +++ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#ifndef CPU_RISCV_GC_SHARED_MODREFBARRIERSETASSEMBLER_RISCV_HPP +++#define CPU_RISCV_GC_SHARED_MODREFBARRIERSETASSEMBLER_RISCV_HPP +++ +++#include "asm/macroAssembler.hpp" +++#include "gc/shared/barrierSetAssembler.hpp" +++ +++// The ModRefBarrierSetAssembler filters away accesses on BasicTypes other +++// than T_OBJECT/T_ARRAY (oops). The oop accesses call one of the protected +++// accesses, which are overridden in the concrete BarrierSetAssembler. +++ +++class ModRefBarrierSetAssembler: public BarrierSetAssembler { +++protected: +++ virtual void gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators, +++ Register addr, Register count, RegSet saved_regs) {} +++ virtual void gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, +++ Register start, Register count, Register tmp, RegSet saved_regs) {} +++ +++ virtual void oop_store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, +++ Address dst, Register val, Register tmp1, Register tmp2) = 0; +++ +++public: +++ virtual void arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop, +++ Register src, Register dst, Register count, RegSet saved_regs); +++ virtual void arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop, +++ Register start, Register count, Register tmp, RegSet saved_regs); +++ virtual void store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, +++ Address dst, Register val, Register tmp1, Register tmp2); +++}; +++ +++#endif // CPU_RISCV_GC_SHARED_MODREFBARRIERSETASSEMBLER_RISCV_HPP ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/gc/shenandoah/c1/shenandoahBarrierSetC1_riscv.cpp ++@@ -0,0 +1,117 @@ +++/* +++ * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved. +++ * Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#include "precompiled.hpp" +++#include "c1/c1_LIRAssembler.hpp" +++#include "c1/c1_MacroAssembler.hpp" +++#include "gc/shared/gc_globals.hpp" +++#include "gc/shenandoah/shenandoahBarrierSet.hpp" +++#include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp" +++#include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp" +++ +++#define __ masm->masm()-> +++ +++void LIR_OpShenandoahCompareAndSwap::emit_code(LIR_Assembler* masm) { +++ Register addr = _addr->as_register_lo(); +++ Register newval = _new_value->as_register(); +++ Register cmpval = _cmp_value->as_register(); +++ Register tmp1 = _tmp1->as_register(); +++ Register tmp2 = _tmp2->as_register(); +++ Register result = result_opr()->as_register(); +++ +++ ShenandoahBarrierSet::assembler()->iu_barrier(masm->masm(), newval, t1); +++ +++ if (UseCompressedOops) { +++ __ encode_heap_oop(tmp1, cmpval); +++ cmpval = tmp1; +++ __ encode_heap_oop(tmp2, newval); +++ newval = tmp2; +++ } +++ +++ ShenandoahBarrierSet::assembler()->cmpxchg_oop(masm->masm(), addr, cmpval, newval, /* acquire */ Assembler::aq, +++ /* release */ Assembler::rl, /* is_cae */ false, result); +++} +++ +++#undef __ +++ +++#ifdef ASSERT +++#define __ gen->lir(__FILE__, __LINE__)-> +++#else +++#define __ gen->lir()-> +++#endif +++ +++LIR_Opr ShenandoahBarrierSetC1::atomic_cmpxchg_at_resolved(LIRAccess& access, LIRItem& cmp_value, LIRItem& new_value) { +++ BasicType bt = access.type(); +++ if (access.is_oop()) { +++ LIRGenerator *gen = access.gen(); +++ if (ShenandoahSATBBarrier) { +++ pre_barrier(gen, access.access_emit_info(), access.decorators(), access.resolved_addr(), +++ LIR_OprFact::illegalOpr /* pre_val */); +++ } +++ if (ShenandoahCASBarrier) { +++ cmp_value.load_item(); +++ new_value.load_item(); +++ +++ LIR_Opr tmp1 = gen->new_register(T_OBJECT); +++ LIR_Opr tmp2 = gen->new_register(T_OBJECT); +++ LIR_Opr addr = access.resolved_addr()->as_address_ptr()->base(); +++ LIR_Opr result = gen->new_register(T_INT); +++ +++ __ append(new LIR_OpShenandoahCompareAndSwap(addr, cmp_value.result(), new_value.result(), tmp1, tmp2, result)); +++ return result; +++ } +++ } +++ return BarrierSetC1::atomic_cmpxchg_at_resolved(access, cmp_value, new_value); +++} +++ +++LIR_Opr ShenandoahBarrierSetC1::atomic_xchg_at_resolved(LIRAccess& access, LIRItem& value) { +++ LIRGenerator* gen = access.gen(); +++ BasicType type = access.type(); +++ +++ LIR_Opr result = gen->new_register(type); +++ value.load_item(); +++ LIR_Opr value_opr = value.result(); +++ +++ if (access.is_oop()) { +++ value_opr = iu_barrier(access.gen(), value_opr, access.access_emit_info(), access.decorators()); +++ } +++ +++ assert(type == T_INT || is_reference_type(type) LP64_ONLY( || type == T_LONG ), "unexpected type"); +++ LIR_Opr tmp = gen->new_register(T_INT); +++ __ xchg(access.resolved_addr(), value_opr, result, tmp); +++ +++ if (access.is_oop()) { +++ result = load_reference_barrier(access.gen(), result, LIR_OprFact::addressConst(0), access.decorators()); +++ LIR_Opr tmp_opr = gen->new_register(type); +++ __ move(result, tmp_opr); +++ result = tmp_opr; +++ if (ShenandoahSATBBarrier) { +++ pre_barrier(access.gen(), access.access_emit_info(), access.decorators(), LIR_OprFact::illegalOpr, +++ result /* pre_val */); +++ } +++ } +++ +++ return result; +++} ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/gc/shenandoah/shenandoahBarrierSetAssembler_riscv.cpp ++@@ -0,0 +1,714 @@ +++/* +++ * Copyright (c) 2018, 2020, Red Hat, Inc. All rights reserved. +++ * Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#include "precompiled.hpp" +++#include "gc/shenandoah/shenandoahBarrierSet.hpp" +++#include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp" +++#include "gc/shenandoah/shenandoahForwarding.hpp" +++#include "gc/shenandoah/shenandoahHeap.inline.hpp" +++#include "gc/shenandoah/shenandoahHeapRegion.hpp" +++#include "gc/shenandoah/shenandoahRuntime.hpp" +++#include "gc/shenandoah/shenandoahThreadLocalData.hpp" +++#include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp" +++#include "interpreter/interpreter.hpp" +++#include "interpreter/interp_masm.hpp" +++#include "runtime/sharedRuntime.hpp" +++#include "runtime/thread.hpp" +++#ifdef COMPILER1 +++#include "c1/c1_LIRAssembler.hpp" +++#include "c1/c1_MacroAssembler.hpp" +++#include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp" +++#endif +++ +++#define __ masm-> +++ +++void ShenandoahBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop, +++ Register src, Register dst, Register count, RegSet saved_regs) { +++ if (is_oop) { +++ bool dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0; +++ if ((ShenandoahSATBBarrier && !dest_uninitialized) || ShenandoahIUBarrier || ShenandoahLoadRefBarrier) { +++ +++ Label done; +++ +++ // Avoid calling runtime if count == 0 +++ __ beqz(count, done); +++ +++ // Is GC active? +++ Address gc_state(xthread, in_bytes(ShenandoahThreadLocalData::gc_state_offset())); +++ assert_different_registers(src, dst, count, t0); +++ +++ __ lbu(t0, gc_state); +++ if (ShenandoahSATBBarrier && dest_uninitialized) { +++ __ test_bit(t0, t0, ShenandoahHeap::HAS_FORWARDED_BITPOS); +++ __ beqz(t0, done); +++ } else { +++ __ andi(t0, t0, ShenandoahHeap::HAS_FORWARDED | ShenandoahHeap::MARKING); +++ __ beqz(t0, done); +++ } +++ +++ __ push_reg(saved_regs, sp); +++ if (UseCompressedOops) { +++ __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::arraycopy_barrier_narrow_oop_entry), +++ src, dst, count); +++ } else { +++ __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::arraycopy_barrier_oop_entry), src, dst, count); +++ } +++ __ pop_reg(saved_regs, sp); +++ __ bind(done); +++ } +++ } +++} +++ +++void ShenandoahBarrierSetAssembler::shenandoah_write_barrier_pre(MacroAssembler* masm, +++ Register obj, +++ Register pre_val, +++ Register thread, +++ Register tmp, +++ bool tosca_live, +++ bool expand_call) { +++ if (ShenandoahSATBBarrier) { +++ satb_write_barrier_pre(masm, obj, pre_val, thread, tmp, tosca_live, expand_call); +++ } +++} +++ +++void ShenandoahBarrierSetAssembler::satb_write_barrier_pre(MacroAssembler* masm, +++ Register obj, +++ Register pre_val, +++ Register thread, +++ Register tmp, +++ bool tosca_live, +++ bool expand_call) { +++ // If expand_call is true then we expand the call_VM_leaf macro +++ // directly to skip generating the check by +++ // InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp. +++ assert(thread == xthread, "must be"); +++ +++ Label done; +++ Label runtime; +++ +++ assert_different_registers(obj, pre_val, tmp, t0); +++ assert(pre_val != noreg && tmp != noreg, "expecting a register"); +++ +++ Address in_progress(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_active_offset())); +++ Address index(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset())); +++ Address buffer(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset())); +++ +++ // Is marking active? +++ if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) { +++ __ lwu(tmp, in_progress); +++ } else { +++ assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption"); +++ __ lbu(tmp, in_progress); +++ } +++ __ beqz(tmp, done); +++ +++ // Do we need to load the previous value? +++ if (obj != noreg) { +++ __ load_heap_oop(pre_val, Address(obj, 0), noreg, noreg, AS_RAW); +++ } +++ +++ // Is the previous value null? +++ __ beqz(pre_val, done); +++ +++ // Can we store original value in the thread's buffer? +++ // Is index == 0? +++ // (The index field is typed as size_t.) +++ __ ld(tmp, index); // tmp := *index_adr +++ __ beqz(tmp, runtime); // tmp == 0? If yes, goto runtime +++ +++ __ sub(tmp, tmp, wordSize); // tmp := tmp - wordSize +++ __ sd(tmp, index); // *index_adr := tmp +++ __ ld(t0, buffer); +++ __ add(tmp, tmp, t0); // tmp := tmp + *buffer_adr +++ +++ // Record the previous value +++ __ sd(pre_val, Address(tmp, 0)); +++ __ j(done); +++ +++ __ bind(runtime); +++ // save the live input values +++ RegSet saved = RegSet::of(pre_val); +++ if (tosca_live) saved += RegSet::of(x10); +++ if (obj != noreg) saved += RegSet::of(obj); +++ +++ __ push_reg(saved, sp); +++ +++ // Calling the runtime using the regular call_VM_leaf mechanism generates +++ // code (generated by InterpreterMacroAssember::call_VM_leaf_base) +++ // that checks that the *(rfp+frame::interpreter_frame_last_sp) == NULL. +++ // +++ // If we care generating the pre-barrier without a frame (e.g. in the +++ // intrinsified Reference.get() routine) then ebp might be pointing to +++ // the caller frame and so this check will most likely fail at runtime. +++ // +++ // Expanding the call directly bypasses the generation of the check. +++ // So when we do not have have a full interpreter frame on the stack +++ // expand_call should be passed true. +++ if (expand_call) { +++ assert(pre_val != c_rarg1, "smashed arg"); +++ __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), pre_val, thread); +++ } else { +++ __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), pre_val, thread); +++ } +++ +++ __ pop_reg(saved, sp); +++ +++ __ bind(done); +++} +++ +++void ShenandoahBarrierSetAssembler::resolve_forward_pointer(MacroAssembler* masm, Register dst, Register tmp) { +++ assert(ShenandoahLoadRefBarrier || ShenandoahCASBarrier, "Should be enabled"); +++ +++ Label is_null; +++ __ beqz(dst, is_null); +++ resolve_forward_pointer_not_null(masm, dst, tmp); +++ __ bind(is_null); +++} +++ +++// IMPORTANT: This must preserve all registers, even t0 and t1, except those explicitely +++// passed in. +++void ShenandoahBarrierSetAssembler::resolve_forward_pointer_not_null(MacroAssembler* masm, Register dst, Register tmp) { +++ assert(ShenandoahLoadRefBarrier || ShenandoahCASBarrier, "Should be enabled"); +++ // The below loads the mark word, checks if the lowest two bits are +++ // set, and if so, clear the lowest two bits and copy the result +++ // to dst. Otherwise it leaves dst alone. +++ // Implementing this is surprisingly awkward. I do it here by: +++ // - Inverting the mark word +++ // - Test lowest two bits == 0 +++ // - If so, set the lowest two bits +++ // - Invert the result back, and copy to dst +++ RegSet saved_regs = RegSet::of(t2); +++ bool borrow_reg = (tmp == noreg); +++ if (borrow_reg) { +++ // No free registers available. Make one useful. +++ tmp = t0; +++ if (tmp == dst) { +++ tmp = t1; +++ } +++ saved_regs += RegSet::of(tmp); +++ } +++ +++ assert_different_registers(tmp, dst, t2); +++ __ push_reg(saved_regs, sp); +++ +++ Label done; +++ __ ld(tmp, Address(dst, oopDesc::mark_offset_in_bytes())); +++ __ xori(tmp, tmp, -1); // eon with 0 is equivalent to XOR with -1 +++ __ andi(t2, tmp, markWord::lock_mask_in_place); +++ __ bnez(t2, done); +++ __ ori(tmp, tmp, markWord::marked_value); +++ __ xori(dst, tmp, -1); // eon with 0 is equivalent to XOR with -1 +++ __ bind(done); +++ +++ __ pop_reg(saved_regs, sp); +++} +++ +++void ShenandoahBarrierSetAssembler::load_reference_barrier(MacroAssembler* masm, +++ Register dst, +++ Address load_addr, +++ DecoratorSet decorators) { +++ assert(ShenandoahLoadRefBarrier, "Should be enabled"); +++ assert(dst != t1 && load_addr.base() != t1, "need t1"); +++ assert_different_registers(load_addr.base(), t0, t1); +++ +++ bool is_strong = ShenandoahBarrierSet::is_strong_access(decorators); +++ bool is_weak = ShenandoahBarrierSet::is_weak_access(decorators); +++ bool is_phantom = ShenandoahBarrierSet::is_phantom_access(decorators); +++ bool is_native = ShenandoahBarrierSet::is_native_access(decorators); +++ bool is_narrow = UseCompressedOops && !is_native; +++ +++ Label heap_stable, not_cset; +++ __ enter(); +++ Address gc_state(xthread, in_bytes(ShenandoahThreadLocalData::gc_state_offset())); +++ __ lbu(t1, gc_state); +++ +++ // Check for heap stability +++ if (is_strong) { +++ __ test_bit(t1, t1, ShenandoahHeap::HAS_FORWARDED_BITPOS); +++ __ beqz(t1, heap_stable); +++ } else { +++ Label lrb; +++ __ test_bit(t0, t1, ShenandoahHeap::WEAK_ROOTS_BITPOS); +++ __ bnez(t0, lrb); +++ __ test_bit(t0, t1, ShenandoahHeap::HAS_FORWARDED_BITPOS); +++ __ beqz(t0, heap_stable); +++ __ bind(lrb); +++ } +++ +++ // use x11 for load address +++ Register result_dst = dst; +++ if (dst == x11) { +++ __ mv(t1, dst); +++ dst = t1; +++ } +++ +++ // Save x10 and x11, unless it is an output register +++ RegSet saved_regs = RegSet::of(x10, x11) - result_dst; +++ __ push_reg(saved_regs, sp); +++ __ la(x11, load_addr); +++ __ mv(x10, dst); +++ +++ // Test for in-cset +++ if (is_strong) { +++ __ mv(t1, ShenandoahHeap::in_cset_fast_test_addr()); +++ __ srli(t0, x10, ShenandoahHeapRegion::region_size_bytes_shift_jint()); +++ __ add(t1, t1, t0); +++ __ lbu(t1, Address(t1)); +++ __ test_bit(t0, t1, 0); +++ __ beqz(t0, not_cset); +++ } +++ +++ __ push_call_clobbered_registers(); +++ address target = NULL; +++ if (is_strong) { +++ if (is_narrow) { +++ target = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong_narrow); +++ } else { +++ target = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong); +++ } +++ } else if (is_weak) { +++ if (is_narrow) { +++ target = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak_narrow); +++ } else { +++ target = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak); +++ } +++ } else { +++ assert(is_phantom, "only remaining strength"); +++ assert(!is_narrow, "phantom access cannot be narrow"); +++ target = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak); +++ } +++ __ call(target); +++ __ mv(t0, x10); +++ __ pop_call_clobbered_registers(); +++ __ mv(x10, t0); +++ __ bind(not_cset); +++ __ mv(result_dst, x10); +++ __ pop_reg(saved_regs, sp); +++ +++ __ bind(heap_stable); +++ __ leave(); +++} +++ +++void ShenandoahBarrierSetAssembler::iu_barrier(MacroAssembler* masm, Register dst, Register tmp) { +++ if (ShenandoahIUBarrier) { +++ __ push_call_clobbered_registers(); +++ +++ satb_write_barrier_pre(masm, noreg, dst, xthread, tmp, true, false); +++ +++ __ pop_call_clobbered_registers(); +++ } +++} +++ +++// +++// Arguments: +++// +++// Inputs: +++// src: oop location to load from, might be clobbered +++// +++// Output: +++// dst: oop loaded from src location +++// +++// Kill: +++// x30 (tmp reg) +++// +++// Alias: +++// dst: x30 (might use x30 as temporary output register to avoid clobbering src) +++// +++void ShenandoahBarrierSetAssembler::load_at(MacroAssembler* masm, +++ DecoratorSet decorators, +++ BasicType type, +++ Register dst, +++ Address src, +++ Register tmp1, +++ Register tmp_thread) { +++ // 1: non-reference load, no additional barrier is needed +++ if (!is_reference_type(type)) { +++ BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread); +++ return; +++ } +++ +++ // 2: load a reference from src location and apply LRB if needed +++ if (ShenandoahBarrierSet::need_load_reference_barrier(decorators, type)) { +++ Register result_dst = dst; +++ +++ // Preserve src location for LRB +++ RegSet saved_regs; +++ if (dst == src.base()) { +++ dst = (src.base() == x28) ? x29 : x28; +++ saved_regs = RegSet::of(dst); +++ __ push_reg(saved_regs, sp); +++ } +++ assert_different_registers(dst, src.base()); +++ +++ BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread); +++ +++ load_reference_barrier(masm, dst, src, decorators); +++ +++ if (dst != result_dst) { +++ __ mv(result_dst, dst); +++ dst = result_dst; +++ } +++ +++ if (saved_regs.bits() != 0) { +++ __ pop_reg(saved_regs, sp); +++ } +++ } else { +++ BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread); +++ } +++ +++ // 3: apply keep-alive barrier if needed +++ if (ShenandoahBarrierSet::need_keep_alive_barrier(decorators, type)) { +++ __ enter(); +++ __ push_call_clobbered_registers(); +++ satb_write_barrier_pre(masm /* masm */, +++ noreg /* obj */, +++ dst /* pre_val */, +++ xthread /* thread */, +++ tmp1 /* tmp */, +++ true /* tosca_live */, +++ true /* expand_call */); +++ __ pop_call_clobbered_registers(); +++ __ leave(); +++ } +++} +++ +++void ShenandoahBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, +++ Address dst, Register val, Register tmp1, Register tmp2) { +++ bool on_oop = is_reference_type(type); +++ if (!on_oop) { +++ BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2); +++ return; +++ } +++ +++ // flatten object address if needed +++ if (dst.offset() == 0) { +++ if (dst.base() != x13) { +++ __ mv(x13, dst.base()); +++ } +++ } else { +++ __ la(x13, dst); +++ } +++ +++ shenandoah_write_barrier_pre(masm, +++ x13 /* obj */, +++ tmp2 /* pre_val */, +++ xthread /* thread */, +++ tmp1 /* tmp */, +++ val != noreg /* tosca_live */, +++ false /* expand_call */); +++ +++ if (val == noreg) { +++ BarrierSetAssembler::store_at(masm, decorators, type, Address(x13, 0), noreg, noreg, noreg); +++ } else { +++ iu_barrier(masm, val, tmp1); +++ // G1 barrier needs uncompressed oop for region cross check. +++ Register new_val = val; +++ if (UseCompressedOops) { +++ new_val = t1; +++ __ mv(new_val, val); +++ } +++ BarrierSetAssembler::store_at(masm, decorators, type, Address(x13, 0), val, noreg, noreg); +++ } +++} +++ +++void ShenandoahBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env, +++ Register obj, Register tmp, Label& slowpath) { +++ Label done; +++ // Resolve jobject +++ BarrierSetAssembler::try_resolve_jobject_in_native(masm, jni_env, obj, tmp, slowpath); +++ +++ // Check for null. +++ __ beqz(obj, done); +++ +++ assert(obj != t1, "need t1"); +++ Address gc_state(jni_env, ShenandoahThreadLocalData::gc_state_offset() - JavaThread::jni_environment_offset()); +++ __ lbu(t1, gc_state); +++ +++ // Check for heap in evacuation phase +++ __ test_bit(t0, t1, ShenandoahHeap::EVACUATION_BITPOS); +++ __ bnez(t0, slowpath); +++ +++ __ bind(done); +++} +++ +++// Special Shenandoah CAS implementation that handles false negatives due +++// to concurrent evacuation. The service is more complex than a +++// traditional CAS operation because the CAS operation is intended to +++// succeed if the reference at addr exactly matches expected or if the +++// reference at addr holds a pointer to a from-space object that has +++// been relocated to the location named by expected. There are two +++// races that must be addressed: +++// a) A parallel thread may mutate the contents of addr so that it points +++// to a different object. In this case, the CAS operation should fail. +++// b) A parallel thread may heal the contents of addr, replacing a +++// from-space pointer held in addr with the to-space pointer +++// representing the new location of the object. +++// Upon entry to cmpxchg_oop, it is assured that new_val equals NULL +++// or it refers to an object that is not being evacuated out of +++// from-space, or it refers to the to-space version of an object that +++// is being evacuated out of from-space. +++// +++// By default the value held in the result register following execution +++// of the generated code sequence is 0 to indicate failure of CAS, +++// non-zero to indicate success. If is_cae, the result is the value most +++// recently fetched from addr rather than a boolean success indicator. +++// +++// Clobbers t0, t1 +++void ShenandoahBarrierSetAssembler::cmpxchg_oop(MacroAssembler* masm, +++ Register addr, +++ Register expected, +++ Register new_val, +++ Assembler::Aqrl acquire, +++ Assembler::Aqrl release, +++ bool is_cae, +++ Register result) { +++ bool is_narrow = UseCompressedOops; +++ Assembler::operand_size size = is_narrow ? Assembler::uint32 : Assembler::int64; +++ +++ assert_different_registers(addr, expected, t0, t1); +++ assert_different_registers(addr, new_val, t0, t1); +++ +++ Label retry, success, fail, done; +++ +++ __ bind(retry); +++ +++ // Step1: Try to CAS. +++ __ cmpxchg(addr, expected, new_val, size, acquire, release, /* result */ t1); +++ +++ // If success, then we are done. +++ __ beq(expected, t1, success); +++ +++ // Step2: CAS failed, check the forwared pointer. +++ __ mv(t0, t1); +++ +++ if (is_narrow) { +++ __ decode_heap_oop(t0, t0); +++ } +++ resolve_forward_pointer(masm, t0); +++ +++ __ encode_heap_oop(t0, t0); +++ +++ // Report failure when the forwarded oop was not expected. +++ __ bne(t0, expected, fail); +++ +++ // Step 3: CAS again using the forwarded oop. +++ __ cmpxchg(addr, t1, new_val, size, acquire, release, /* result */ t0); +++ +++ // Retry when failed. +++ __ bne(t0, t1, retry); +++ +++ __ bind(success); +++ if (is_cae) { +++ __ mv(result, expected); +++ } else { +++ __ mv(result, 1); +++ } +++ __ j(done); +++ +++ __ bind(fail); +++ if (is_cae) { +++ __ mv(result, t0); +++ } else { +++ __ mv(result, zr); +++ } +++ +++ __ bind(done); +++} +++ +++#undef __ +++ +++#ifdef COMPILER1 +++ +++#define __ ce->masm()-> +++ +++void ShenandoahBarrierSetAssembler::gen_pre_barrier_stub(LIR_Assembler* ce, ShenandoahPreBarrierStub* stub) { +++ ShenandoahBarrierSetC1* bs = (ShenandoahBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1(); +++ // At this point we know that marking is in progress. +++ // If do_load() is true then we have to emit the +++ // load of the previous value; otherwise it has already +++ // been loaded into _pre_val. +++ __ bind(*stub->entry()); +++ +++ assert(stub->pre_val()->is_register(), "Precondition."); +++ +++ Register pre_val_reg = stub->pre_val()->as_register(); +++ +++ if (stub->do_load()) { +++ ce->mem2reg(stub->addr(), stub->pre_val(), T_OBJECT, stub->patch_code(), stub->info(), false /* wide */, false /* unaligned */); +++ } +++ __ beqz(pre_val_reg, *stub->continuation(), /* is_far */ true); +++ ce->store_parameter(stub->pre_val()->as_register(), 0); +++ __ far_call(RuntimeAddress(bs->pre_barrier_c1_runtime_code_blob()->code_begin())); +++ __ j(*stub->continuation()); +++} +++ +++void ShenandoahBarrierSetAssembler::gen_load_reference_barrier_stub(LIR_Assembler* ce, +++ ShenandoahLoadReferenceBarrierStub* stub) { +++ ShenandoahBarrierSetC1* bs = (ShenandoahBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1(); +++ __ bind(*stub->entry()); +++ +++ DecoratorSet decorators = stub->decorators(); +++ bool is_strong = ShenandoahBarrierSet::is_strong_access(decorators); +++ bool is_weak = ShenandoahBarrierSet::is_weak_access(decorators); +++ bool is_phantom = ShenandoahBarrierSet::is_phantom_access(decorators); +++ bool is_native = ShenandoahBarrierSet::is_native_access(decorators); +++ +++ Register obj = stub->obj()->as_register(); +++ Register res = stub->result()->as_register(); +++ Register addr = stub->addr()->as_pointer_register(); +++ Register tmp1 = stub->tmp1()->as_register(); +++ Register tmp2 = stub->tmp2()->as_register(); +++ +++ assert(res == x10, "result must arrive in x10"); +++ assert_different_registers(tmp1, tmp2, t0); +++ +++ if (res != obj) { +++ __ mv(res, obj); +++ } +++ +++ if (is_strong) { +++ // Check for object in cset. +++ __ mv(tmp2, ShenandoahHeap::in_cset_fast_test_addr()); +++ __ srli(tmp1, res, ShenandoahHeapRegion::region_size_bytes_shift_jint()); +++ __ add(tmp2, tmp2, tmp1); +++ __ lbu(tmp2, Address(tmp2)); +++ __ beqz(tmp2, *stub->continuation(), true /* is_far */); +++ } +++ +++ ce->store_parameter(res, 0); +++ ce->store_parameter(addr, 1); +++ +++ if (is_strong) { +++ if (is_native) { +++ __ far_call(RuntimeAddress(bs->load_reference_barrier_strong_native_rt_code_blob()->code_begin())); +++ } else { +++ __ far_call(RuntimeAddress(bs->load_reference_barrier_strong_rt_code_blob()->code_begin())); +++ } +++ } else if (is_weak) { +++ __ far_call(RuntimeAddress(bs->load_reference_barrier_weak_rt_code_blob()->code_begin())); +++ } else { +++ assert(is_phantom, "only remaining strength"); +++ __ far_call(RuntimeAddress(bs->load_reference_barrier_phantom_rt_code_blob()->code_begin())); +++ } +++ +++ __ j(*stub->continuation()); +++} +++ +++#undef __ +++ +++#define __ sasm-> +++ +++void ShenandoahBarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm) { +++ __ prologue("shenandoah_pre_barrier", false); +++ +++ // arg0 : previous value of memory +++ +++ BarrierSet* bs = BarrierSet::barrier_set(); +++ +++ const Register pre_val = x10; +++ const Register thread = xthread; +++ const Register tmp = t0; +++ +++ Address queue_index(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset())); +++ Address buffer(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset())); +++ +++ Label done; +++ Label runtime; +++ +++ // Is marking still active? +++ Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset())); +++ __ lb(tmp, gc_state); +++ __ test_bit(tmp, tmp, ShenandoahHeap::MARKING_BITPOS); +++ __ beqz(tmp, done); +++ +++ // Can we store original value in the thread's buffer? +++ __ ld(tmp, queue_index); +++ __ beqz(tmp, runtime); +++ +++ __ sub(tmp, tmp, wordSize); +++ __ sd(tmp, queue_index); +++ __ ld(t1, buffer); +++ __ add(tmp, tmp, t1); +++ __ load_parameter(0, t1); +++ __ sd(t1, Address(tmp, 0)); +++ __ j(done); +++ +++ __ bind(runtime); +++ __ push_call_clobbered_registers(); +++ __ load_parameter(0, pre_val); +++ __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), pre_val, thread); +++ __ pop_call_clobbered_registers(); +++ __ bind(done); +++ +++ __ epilogue(); +++} +++ +++void ShenandoahBarrierSetAssembler::generate_c1_load_reference_barrier_runtime_stub(StubAssembler* sasm, +++ DecoratorSet decorators) { +++ __ prologue("shenandoah_load_reference_barrier", false); +++ // arg0 : object to be resolved +++ +++ __ push_call_clobbered_registers(); +++ __ load_parameter(0, x10); +++ __ load_parameter(1, x11); +++ +++ bool is_strong = ShenandoahBarrierSet::is_strong_access(decorators); +++ bool is_weak = ShenandoahBarrierSet::is_weak_access(decorators); +++ bool is_phantom = ShenandoahBarrierSet::is_phantom_access(decorators); +++ bool is_native = ShenandoahBarrierSet::is_native_access(decorators); +++ address target = NULL; +++ if (is_strong) { +++ if (is_native) { +++ target = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong); +++ } else { +++ if (UseCompressedOops) { +++ target = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong_narrow); +++ } else { +++ target = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong); +++ } +++ } +++ } else if (is_weak) { +++ assert(!is_native, "weak must not be called off-heap"); +++ if (UseCompressedOops) { +++ target = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak_narrow); +++ } else { +++ target = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak); +++ } +++ } else { +++ assert(is_phantom, "only remaining strength"); +++ assert(is_native, "phantom must only be called off-heap"); +++ target = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom); +++ } +++ __ call(target); +++ __ mv(t0, x10); +++ __ pop_call_clobbered_registers(); +++ __ mv(x10, t0); +++ +++ __ epilogue(); +++} +++ +++#undef __ +++ +++#endif // COMPILER1 ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/gc/shenandoah/shenandoahBarrierSetAssembler_riscv.hpp ++@@ -0,0 +1,88 @@ +++/* +++ * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved. +++ * Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#ifndef CPU_RISCV_GC_SHENANDOAH_SHENANDOAHBARRIERSETASSEMBLER_RISCV_HPP +++#define CPU_RISCV_GC_SHENANDOAH_SHENANDOAHBARRIERSETASSEMBLER_RISCV_HPP +++ +++#include "asm/macroAssembler.hpp" +++#include "gc/shared/barrierSetAssembler.hpp" +++#include "gc/shenandoah/shenandoahBarrierSet.hpp" +++#ifdef COMPILER1 +++class LIR_Assembler; +++class ShenandoahPreBarrierStub; +++class ShenandoahLoadReferenceBarrierStub; +++class StubAssembler; +++#endif +++class StubCodeGenerator; +++ +++class ShenandoahBarrierSetAssembler: public BarrierSetAssembler { +++private: +++ +++ void satb_write_barrier_pre(MacroAssembler* masm, +++ Register obj, +++ Register pre_val, +++ Register thread, +++ Register tmp, +++ bool tosca_live, +++ bool expand_call); +++ void shenandoah_write_barrier_pre(MacroAssembler* masm, +++ Register obj, +++ Register pre_val, +++ Register thread, +++ Register tmp, +++ bool tosca_live, +++ bool expand_call); +++ +++ void resolve_forward_pointer(MacroAssembler* masm, Register dst, Register tmp = noreg); +++ void resolve_forward_pointer_not_null(MacroAssembler* masm, Register dst, Register tmp = noreg); +++ void load_reference_barrier(MacroAssembler* masm, Register dst, Address load_addr, DecoratorSet decorators); +++ +++public: +++ +++ void iu_barrier(MacroAssembler* masm, Register dst, Register tmp); +++ +++#ifdef COMPILER1 +++ void gen_pre_barrier_stub(LIR_Assembler* ce, ShenandoahPreBarrierStub* stub); +++ void gen_load_reference_barrier_stub(LIR_Assembler* ce, ShenandoahLoadReferenceBarrierStub* stub); +++ void generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm); +++ void generate_c1_load_reference_barrier_runtime_stub(StubAssembler* sasm, DecoratorSet decorators); +++#endif +++ +++ virtual void arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop, +++ Register src, Register dst, Register count, RegSet saved_regs); +++ +++ virtual void load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, +++ Register dst, Address src, Register tmp1, Register tmp_thread); +++ virtual void store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, +++ Address dst, Register val, Register tmp1, Register tmp2); +++ +++ virtual void try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env, +++ Register obj, Register tmp, Label& slowpath); +++ +++ void cmpxchg_oop(MacroAssembler* masm, Register addr, Register expected, Register new_val, +++ Assembler::Aqrl acquire, Assembler::Aqrl release, bool is_cae, Register result); +++}; +++ +++#endif // CPU_RISCV_GC_SHENANDOAH_SHENANDOAHBARRIERSETASSEMBLER_RISCV_HPP ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/gc/shenandoah/shenandoah_riscv64.ad ++@@ -0,0 +1,285 @@ +++// +++// Copyright (c) 2018, Red Hat, Inc. All rights reserved. +++// Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved. +++// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++// +++// This code is free software; you can redistribute it and/or modify it +++// under the terms of the GNU General Public License version 2 only, as +++// published by the Free Software Foundation. +++// +++// This code is distributed in the hope that it will be useful, but WITHOUT +++// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++// version 2 for more details (a copy is included in the LICENSE file that +++// accompanied this code). +++// +++// You should have received a copy of the GNU General Public License version +++// 2 along with this work; if not, write to the Free Software Foundation, +++// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++// +++// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++// or visit www.oracle.com if you need additional information or have any +++// questions. +++// +++// +++ +++source_hpp %{ +++#include "gc/shenandoah/shenandoahBarrierSet.hpp" +++#include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp" +++%} +++ +++instruct compareAndSwapP_shenandoah(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp tmp, rFlagsReg cr) %{ +++ match(Set res (ShenandoahCompareAndSwapP mem (Binary oldval newval))); +++ ins_cost(10 * DEFAULT_COST); +++ +++ effect(TEMP tmp, KILL cr); +++ +++ format %{ +++ "cmpxchg_shenandoah $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval with temp $tmp, #@compareAndSwapP_shenandoah" +++ %} +++ +++ ins_encode %{ +++ Register tmp = $tmp$$Register; +++ __ mv(tmp, $oldval$$Register); // Must not clobber oldval. +++ ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register, +++ Assembler::relaxed /* acquire */, Assembler::rl /* release */, +++ false /* is_cae */, $res$$Register); +++ %} +++ +++ ins_pipe(pipe_slow); +++%} +++ +++instruct compareAndSwapN_shenandoah(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, iRegNNoSp tmp, rFlagsReg cr) %{ +++ match(Set res (ShenandoahCompareAndSwapN mem (Binary oldval newval))); +++ ins_cost(10 * DEFAULT_COST); +++ +++ effect(TEMP tmp, KILL cr); +++ +++ format %{ +++ "cmpxchgw_shenandoah $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval with temp $tmp, #@compareAndSwapN_shenandoah" +++ %} +++ +++ ins_encode %{ +++ Register tmp = $tmp$$Register; +++ __ mv(tmp, $oldval$$Register); // Must not clobber oldval. +++ ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register, +++ Assembler::relaxed /* acquire */, Assembler::rl /* release */, +++ false /* is_cae */, $res$$Register); +++ %} +++ +++ ins_pipe(pipe_slow); +++%} +++ +++instruct compareAndSwapPAcq_shenandoah(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp tmp, rFlagsReg cr) %{ +++ predicate(needs_acquiring_load_reserved(n)); +++ match(Set res (ShenandoahCompareAndSwapP mem (Binary oldval newval))); +++ ins_cost(10 * DEFAULT_COST); +++ +++ effect(TEMP tmp, KILL cr); +++ +++ format %{ +++ "cmpxchg_acq_shenandoah_oop $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval with temp $tmp, #@compareAndSwapPAcq_shenandoah" +++ %} +++ +++ ins_encode %{ +++ Register tmp = $tmp$$Register; +++ __ mv(tmp, $oldval$$Register); // Must not clobber oldval. +++ ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register, +++ Assembler::aq /* acquire */, Assembler::rl /* release */, +++ false /* is_cae */, $res$$Register); +++ %} +++ +++ ins_pipe(pipe_slow); +++%} +++ +++instruct compareAndSwapNAcq_shenandoah(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, iRegNNoSp tmp, rFlagsReg cr) %{ +++ predicate(needs_acquiring_load_reserved(n)); +++ match(Set res (ShenandoahCompareAndSwapN mem (Binary oldval newval))); +++ ins_cost(10 * DEFAULT_COST); +++ +++ effect(TEMP tmp, KILL cr); +++ +++ format %{ +++ "cmpxchgw_acq_shenandoah_narrow_oop $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval with temp $tmp, #@compareAndSwapNAcq_shenandoah" +++ %} +++ +++ ins_encode %{ +++ Register tmp = $tmp$$Register; +++ __ mv(tmp, $oldval$$Register); // Must not clobber oldval. +++ ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register, +++ Assembler::aq /* acquire */, Assembler::rl /* release */, +++ false /* is_cae */, $res$$Register); +++ %} +++ +++ ins_pipe(pipe_slow); +++%} +++ +++instruct compareAndExchangeN_shenandoah(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, iRegNNoSp tmp, rFlagsReg cr) %{ +++ match(Set res (ShenandoahCompareAndExchangeN mem (Binary oldval newval))); +++ ins_cost(10 * DEFAULT_COST); +++ effect(TEMP_DEF res, TEMP tmp, KILL cr); +++ +++ format %{ +++ "cmpxchgw_shenandoah $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeN_shenandoah" +++ %} +++ +++ ins_encode %{ +++ Register tmp = $tmp$$Register; +++ __ mv(tmp, $oldval$$Register); // Must not clobber oldval. +++ ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register, +++ Assembler::relaxed /* acquire */, Assembler::rl /* release */, +++ true /* is_cae */, $res$$Register); +++ %} +++ +++ ins_pipe(pipe_slow); +++%} +++ +++instruct compareAndExchangeP_shenandoah(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp tmp, rFlagsReg cr) %{ +++ match(Set res (ShenandoahCompareAndExchangeP mem (Binary oldval newval))); +++ ins_cost(10 * DEFAULT_COST); +++ +++ effect(TEMP_DEF res, TEMP tmp, KILL cr); +++ format %{ +++ "cmpxchg_shenandoah $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval with temp $tmp, #@compareAndExchangeP_shenandoah" +++ %} +++ +++ ins_encode %{ +++ Register tmp = $tmp$$Register; +++ __ mv(tmp, $oldval$$Register); // Must not clobber oldval. +++ ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register, +++ Assembler::relaxed /* acquire */, Assembler::rl /* release */, +++ true /* is_cae */, $res$$Register); +++ %} +++ +++ ins_pipe(pipe_slow); +++%} +++ +++instruct weakCompareAndSwapN_shenandoah(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, iRegNNoSp tmp, rFlagsReg cr) %{ +++ match(Set res (ShenandoahWeakCompareAndSwapN mem (Binary oldval newval))); +++ ins_cost(10 * DEFAULT_COST); +++ +++ effect(TEMP tmp, KILL cr); +++ format %{ +++ "cmpxchgw_shenandoah $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval, #@weakCompareAndSwapN_shenandoah" +++ "mv $res, EQ\t# $res <-- (EQ ? 1 : 0)" +++ %} +++ +++ ins_encode %{ +++ Register tmp = $tmp$$Register; +++ __ mv(tmp, $oldval$$Register); // Must not clobber oldval. +++ // Weak is not current supported by ShenandoahBarrierSet::cmpxchg_oop +++ ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register, +++ Assembler::relaxed /* acquire */, Assembler::rl /* release */, +++ false /* is_cae */, $res$$Register); +++ %} +++ +++ ins_pipe(pipe_slow); +++%} +++ +++instruct compareAndExchangeNAcq_shenandoah(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, iRegNNoSp tmp, rFlagsReg cr) %{ +++ predicate(needs_acquiring_load_reserved(n)); +++ match(Set res (ShenandoahCompareAndExchangeN mem (Binary oldval newval))); +++ ins_cost(10 * DEFAULT_COST); +++ +++ effect(TEMP_DEF res, TEMP tmp, KILL cr); +++ format %{ +++ "cmpxchgw_acq_shenandoah $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeNAcq_shenandoah" +++ %} +++ +++ ins_encode %{ +++ Register tmp = $tmp$$Register; +++ __ mv(tmp, $oldval$$Register); +++ ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register, +++ Assembler::aq /* acquire */, Assembler::rl /* release */, +++ true /* is_cae */, $res$$Register); +++ %} +++ +++ ins_pipe(pipe_slow); +++%} +++ +++instruct compareAndExchangePAcq_shenandoah(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp tmp, rFlagsReg cr) %{ +++ predicate(needs_acquiring_load_reserved(n)); +++ match(Set res (ShenandoahCompareAndExchangeP mem (Binary oldval newval))); +++ ins_cost(10 * DEFAULT_COST); +++ +++ effect(TEMP_DEF res, TEMP tmp, KILL cr); +++ format %{ +++ "cmpxchg_acq_shenandoah $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangePAcq_shenandoah" +++ %} +++ +++ ins_encode %{ +++ Register tmp = $tmp$$Register; +++ __ mv(tmp, $oldval$$Register); +++ ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register, +++ Assembler::aq /* acquire */, Assembler::rl /* release */, +++ true /* is_cae */, $res$$Register); +++ %} +++ +++ ins_pipe(pipe_slow); +++%} +++ +++instruct weakCompareAndSwapP_shenandoah(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp tmp, rFlagsReg cr) %{ +++ match(Set res (ShenandoahWeakCompareAndSwapP mem (Binary oldval newval))); +++ ins_cost(10 * DEFAULT_COST); +++ +++ effect(TEMP tmp, KILL cr); +++ format %{ +++ "cmpxchg_shenandoah $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval, #@weakCompareAndSwapP_shenandoah" +++ %} +++ +++ ins_encode %{ +++ Register tmp = $tmp$$Register; +++ __ mv(tmp, $oldval$$Register); // Must not clobber oldval. +++ ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register, +++ Assembler::relaxed /* acquire */, Assembler::rl /* release */, +++ false /* is_cae */, $res$$Register); +++ %} +++ +++ ins_pipe(pipe_slow); +++%} +++ +++instruct weakCompareAndSwapNAcq_shenandoah(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, iRegNNoSp tmp, rFlagsReg cr) %{ +++ predicate(needs_acquiring_load_reserved(n)); +++ match(Set res (ShenandoahWeakCompareAndSwapN mem (Binary oldval newval))); +++ ins_cost(10 * DEFAULT_COST); +++ +++ effect(TEMP tmp, KILL cr); +++ format %{ +++ "cmpxchgw_acq_shenandoah $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval, #@weakCompareAndSwapNAcq_shenandoah" +++ "mv $res, EQ\t# $res <-- (EQ ? 1 : 0)" +++ %} +++ +++ ins_encode %{ +++ Register tmp = $tmp$$Register; +++ __ mv(tmp, $oldval$$Register); // Must not clobber oldval. +++ // Weak is not current supported by ShenandoahBarrierSet::cmpxchg_oop +++ ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register, +++ Assembler::aq /* acquire */, Assembler::rl /* release */, +++ false /* is_cae */, $res$$Register); +++ %} +++ +++ ins_pipe(pipe_slow); +++%} +++ +++instruct weakCompareAndSwapPAcq_shenandoah(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp tmp, rFlagsReg cr) %{ +++ predicate(needs_acquiring_load_reserved(n)); +++ match(Set res (ShenandoahWeakCompareAndSwapP mem (Binary oldval newval))); +++ ins_cost(10 * DEFAULT_COST); +++ +++ effect(TEMP tmp, KILL cr); +++ format %{ +++ "cmpxchg_acq_shenandoah $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval, #@weakCompareAndSwapPAcq_shenandoah" +++ "mv $res, EQ\t# $res <-- (EQ ? 1 : 0)" +++ %} +++ +++ ins_encode %{ +++ Register tmp = $tmp$$Register; +++ __ mv(tmp, $oldval$$Register); // Must not clobber oldval. +++ // Weak is not current supported by ShenandoahBarrierSet::cmpxchg_oop +++ ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register, +++ Assembler::aq /* acquire */, Assembler::rl /* release */, +++ false /* is_cae */, $res$$Register); +++ %} +++ +++ ins_pipe(pipe_slow); +++%} ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/gc/z/zBarrierSetAssembler_riscv.cpp ++@@ -0,0 +1,445 @@ +++/* +++ * Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#include "precompiled.hpp" +++#include "asm/macroAssembler.inline.hpp" +++#include "code/codeBlob.hpp" +++#include "code/vmreg.inline.hpp" +++#include "gc/z/zBarrier.inline.hpp" +++#include "gc/z/zBarrierSet.hpp" +++#include "gc/z/zBarrierSetAssembler.hpp" +++#include "gc/z/zBarrierSetRuntime.hpp" +++#include "gc/z/zThreadLocalData.hpp" +++#include "memory/resourceArea.hpp" +++#include "runtime/sharedRuntime.hpp" +++#include "utilities/macros.hpp" +++#ifdef COMPILER1 +++#include "c1/c1_LIRAssembler.hpp" +++#include "c1/c1_MacroAssembler.hpp" +++#include "gc/z/c1/zBarrierSetC1.hpp" +++#endif // COMPILER1 +++#ifdef COMPILER2 +++#include "gc/z/c2/zBarrierSetC2.hpp" +++#endif // COMPILER2 +++ +++#ifdef PRODUCT +++#define BLOCK_COMMENT(str) /* nothing */ +++#else +++#define BLOCK_COMMENT(str) __ block_comment(str) +++#endif +++ +++#undef __ +++#define __ masm-> +++ +++void ZBarrierSetAssembler::load_at(MacroAssembler* masm, +++ DecoratorSet decorators, +++ BasicType type, +++ Register dst, +++ Address src, +++ Register tmp1, +++ Register tmp_thread) { +++ if (!ZBarrierSet::barrier_needed(decorators, type)) { +++ // Barrier not needed +++ BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread); +++ return; +++ } +++ +++ assert_different_registers(t1, src.base()); +++ assert_different_registers(t0, t1, dst); +++ +++ Label done; +++ +++ // Load bad mask into temp register. +++ __ la(t0, src); +++ __ ld(t1, address_bad_mask_from_thread(xthread)); +++ __ ld(dst, Address(t0)); +++ +++ // Test reference against bad mask. If mask bad, then we need to fix it up. +++ __ andr(t1, dst, t1); +++ __ beqz(t1, done); +++ +++ __ enter(); +++ +++ __ push_call_clobbered_registers_except(RegSet::of(dst)); +++ +++ if (c_rarg0 != dst) { +++ __ mv(c_rarg0, dst); +++ } +++ +++ __ mv(c_rarg1, t0); +++ +++ __ call_VM_leaf(ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators), 2); +++ +++ // Make sure dst has the return value. +++ if (dst != x10) { +++ __ mv(dst, x10); +++ } +++ +++ __ pop_call_clobbered_registers_except(RegSet::of(dst)); +++ __ leave(); +++ +++ __ bind(done); +++} +++ +++#ifdef ASSERT +++ +++void ZBarrierSetAssembler::store_at(MacroAssembler* masm, +++ DecoratorSet decorators, +++ BasicType type, +++ Address dst, +++ Register val, +++ Register tmp1, +++ Register tmp2) { +++ // Verify value +++ if (is_reference_type(type)) { +++ // Note that src could be noreg, which means we +++ // are storing null and can skip verification. +++ if (val != noreg) { +++ Label done; +++ +++ // tmp1 and tmp2 are often set to noreg. +++ RegSet savedRegs = RegSet::of(t0); +++ __ push_reg(savedRegs, sp); +++ +++ __ ld(t0, address_bad_mask_from_thread(xthread)); +++ __ andr(t0, val, t0); +++ __ beqz(t0, done); +++ __ stop("Verify oop store failed"); +++ __ should_not_reach_here(); +++ __ bind(done); +++ __ pop_reg(savedRegs, sp); +++ } +++ } +++ +++ // Store value +++ BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2); +++} +++ +++#endif // ASSERT +++ +++void ZBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, +++ DecoratorSet decorators, +++ bool is_oop, +++ Register src, +++ Register dst, +++ Register count, +++ RegSet saved_regs) { +++ if (!is_oop) { +++ // Barrier not needed +++ return; +++ } +++ +++ BLOCK_COMMENT("ZBarrierSetAssembler::arraycopy_prologue {"); +++ +++ assert_different_registers(src, count, t0); +++ +++ __ push_reg(saved_regs, sp); +++ +++ if (count == c_rarg0 && src == c_rarg1) { +++ // exactly backwards!! +++ __ xorr(c_rarg0, c_rarg0, c_rarg1); +++ __ xorr(c_rarg1, c_rarg0, c_rarg1); +++ __ xorr(c_rarg0, c_rarg0, c_rarg1); +++ } else { +++ __ mv(c_rarg0, src); +++ __ mv(c_rarg1, count); +++ } +++ +++ __ call_VM_leaf(ZBarrierSetRuntime::load_barrier_on_oop_array_addr(), 2); +++ +++ __ pop_reg(saved_regs, sp); +++ +++ BLOCK_COMMENT("} ZBarrierSetAssembler::arraycopy_prologue"); +++} +++ +++void ZBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, +++ Register jni_env, +++ Register robj, +++ Register tmp, +++ Label& slowpath) { +++ BLOCK_COMMENT("ZBarrierSetAssembler::try_resolve_jobject_in_native {"); +++ +++ assert_different_registers(jni_env, robj, tmp); +++ +++ // Resolve jobject +++ BarrierSetAssembler::try_resolve_jobject_in_native(masm, jni_env, robj, tmp, slowpath); +++ +++ // Compute the offset of address bad mask from the field of jni_environment +++ long int bad_mask_relative_offset = (long int) (in_bytes(ZThreadLocalData::address_bad_mask_offset()) - +++ in_bytes(JavaThread::jni_environment_offset())); +++ +++ // Load the address bad mask +++ __ ld(tmp, Address(jni_env, bad_mask_relative_offset)); +++ +++ // Check address bad mask +++ __ andr(tmp, robj, tmp); +++ __ bnez(tmp, slowpath); +++ +++ BLOCK_COMMENT("} ZBarrierSetAssembler::try_resolve_jobject_in_native"); +++} +++ +++#ifdef COMPILER2 +++ +++OptoReg::Name ZBarrierSetAssembler::refine_register(const Node* node, OptoReg::Name opto_reg) { +++ if (!OptoReg::is_reg(opto_reg)) { +++ return OptoReg::Bad; +++ } +++ +++ const VMReg vm_reg = OptoReg::as_VMReg(opto_reg); +++ if (vm_reg->is_FloatRegister()) { +++ return opto_reg & ~1; +++ } +++ +++ return opto_reg; +++} +++ +++#undef __ +++#define __ _masm-> +++ +++class ZSaveLiveRegisters { +++private: +++ MacroAssembler* const _masm; +++ RegSet _gp_regs; +++ FloatRegSet _fp_regs; +++ VectorRegSet _vp_regs; +++ +++public: +++ void initialize(ZLoadBarrierStubC2* stub) { +++ // Record registers that needs to be saved/restored +++ RegMaskIterator rmi(stub->live()); +++ while (rmi.has_next()) { +++ const OptoReg::Name opto_reg = rmi.next(); +++ if (OptoReg::is_reg(opto_reg)) { +++ const VMReg vm_reg = OptoReg::as_VMReg(opto_reg); +++ if (vm_reg->is_Register()) { +++ _gp_regs += RegSet::of(vm_reg->as_Register()); +++ } else if (vm_reg->is_FloatRegister()) { +++ _fp_regs += FloatRegSet::of(vm_reg->as_FloatRegister()); +++ } else if (vm_reg->is_VectorRegister()) { +++ const VMReg vm_reg_base = OptoReg::as_VMReg(opto_reg & ~(VectorRegisterImpl::max_slots_per_register - 1)); +++ _vp_regs += VectorRegSet::of(vm_reg_base->as_VectorRegister()); +++ } else { +++ fatal("Unknown register type"); +++ } +++ } +++ } +++ +++ // Remove C-ABI SOE registers, tmp regs and _ref register that will be updated +++ _gp_regs -= RegSet::range(x18, x27) + RegSet::of(x2) + RegSet::of(x8, x9) + RegSet::of(x5, stub->ref()); +++ } +++ +++ ZSaveLiveRegisters(MacroAssembler* masm, ZLoadBarrierStubC2* stub) : +++ _masm(masm), +++ _gp_regs(), +++ _fp_regs(), +++ _vp_regs() { +++ // Figure out what registers to save/restore +++ initialize(stub); +++ +++ // Save registers +++ __ push_reg(_gp_regs, sp); +++ __ push_fp(_fp_regs, sp); +++ __ push_v(_vp_regs, sp); +++ } +++ +++ ~ZSaveLiveRegisters() { +++ // Restore registers +++ __ pop_v(_vp_regs, sp); +++ __ pop_fp(_fp_regs, sp); +++ __ pop_reg(_gp_regs, sp); +++ } +++}; +++ +++class ZSetupArguments { +++private: +++ MacroAssembler* const _masm; +++ const Register _ref; +++ const Address _ref_addr; +++ +++public: +++ ZSetupArguments(MacroAssembler* masm, ZLoadBarrierStubC2* stub) : +++ _masm(masm), +++ _ref(stub->ref()), +++ _ref_addr(stub->ref_addr()) { +++ +++ // Setup arguments +++ if (_ref_addr.base() == noreg) { +++ // No self healing +++ if (_ref != c_rarg0) { +++ __ mv(c_rarg0, _ref); +++ } +++ __ mv(c_rarg1, zr); +++ } else { +++ // Self healing +++ if (_ref == c_rarg0) { +++ // _ref is already at correct place +++ __ la(c_rarg1, _ref_addr); +++ } else if (_ref != c_rarg1) { +++ // _ref is in wrong place, but not in c_rarg1, so fix it first +++ __ la(c_rarg1, _ref_addr); +++ __ mv(c_rarg0, _ref); +++ } else if (_ref_addr.base() != c_rarg0) { +++ assert(_ref == c_rarg1, "Mov ref first, vacating c_rarg0"); +++ __ mv(c_rarg0, _ref); +++ __ la(c_rarg1, _ref_addr); +++ } else { +++ assert(_ref == c_rarg1, "Need to vacate c_rarg1 and _ref_addr is using c_rarg0"); +++ if (_ref_addr.base() == c_rarg0) { +++ __ mv(t1, c_rarg1); +++ __ la(c_rarg1, _ref_addr); +++ __ mv(c_rarg0, t1); +++ } else { +++ ShouldNotReachHere(); +++ } +++ } +++ } +++ } +++ +++ ~ZSetupArguments() { +++ // Transfer result +++ if (_ref != x10) { +++ __ mv(_ref, x10); +++ } +++ } +++}; +++ +++#undef __ +++#define __ masm-> +++ +++void ZBarrierSetAssembler::generate_c2_load_barrier_stub(MacroAssembler* masm, ZLoadBarrierStubC2* stub) const { +++ BLOCK_COMMENT("ZLoadBarrierStubC2"); +++ +++ // Stub entry +++ __ bind(*stub->entry()); +++ +++ { +++ ZSaveLiveRegisters save_live_registers(masm, stub); +++ ZSetupArguments setup_arguments(masm, stub); +++ +++ Address target(stub->slow_path()); +++ __ relocate(target.rspec(), [&] { +++ int32_t offset; +++ __ la_patchable(t0, target, offset); +++ __ jalr(x1, t0, offset); +++ }); +++ } +++ +++ // Stub exit +++ __ j(*stub->continuation()); +++} +++ +++#undef __ +++ +++#endif // COMPILER2 +++ +++#ifdef COMPILER1 +++#undef __ +++#define __ ce->masm()-> +++ +++void ZBarrierSetAssembler::generate_c1_load_barrier_test(LIR_Assembler* ce, +++ LIR_Opr ref) const { +++ assert_different_registers(xthread, ref->as_register(), t1); +++ __ ld(t1, address_bad_mask_from_thread(xthread)); +++ __ andr(t1, t1, ref->as_register()); +++} +++ +++void ZBarrierSetAssembler::generate_c1_load_barrier_stub(LIR_Assembler* ce, +++ ZLoadBarrierStubC1* stub) const { +++ // Stub entry +++ __ bind(*stub->entry()); +++ +++ Register ref = stub->ref()->as_register(); +++ Register ref_addr = noreg; +++ Register tmp = noreg; +++ +++ if (stub->tmp()->is_valid()) { +++ // Load address into tmp register +++ ce->leal(stub->ref_addr(), stub->tmp()); +++ ref_addr = tmp = stub->tmp()->as_pointer_register(); +++ } else { +++ // Address already in register +++ ref_addr = stub->ref_addr()->as_address_ptr()->base()->as_pointer_register(); +++ } +++ +++ assert_different_registers(ref, ref_addr, noreg); +++ +++ // Save x10 unless it is the result or tmp register +++ // Set up SP to accomodate parameters and maybe x10. +++ if (ref != x10 && tmp != x10) { +++ __ sub(sp, sp, 32); +++ __ sd(x10, Address(sp, 16)); +++ } else { +++ __ sub(sp, sp, 16); +++ } +++ +++ // Setup arguments and call runtime stub +++ ce->store_parameter(ref_addr, 1); +++ ce->store_parameter(ref, 0); +++ +++ __ far_call(stub->runtime_stub()); +++ +++ // Verify result +++ __ verify_oop(x10, "Bad oop"); +++ +++ +++ // Move result into place +++ if (ref != x10) { +++ __ mv(ref, x10); +++ } +++ +++ // Restore x10 unless it is the result or tmp register +++ if (ref != x10 && tmp != x10) { +++ __ ld(x10, Address(sp, 16)); +++ __ add(sp, sp, 32); +++ } else { +++ __ add(sp, sp, 16); +++ } +++ +++ // Stub exit +++ __ j(*stub->continuation()); +++} +++ +++#undef __ +++#define __ sasm-> +++ +++void ZBarrierSetAssembler::generate_c1_load_barrier_runtime_stub(StubAssembler* sasm, +++ DecoratorSet decorators) const { +++ __ prologue("zgc_load_barrier stub", false); +++ +++ __ push_call_clobbered_registers_except(RegSet::of(x10)); +++ +++ // Setup arguments +++ __ load_parameter(0, c_rarg0); +++ __ load_parameter(1, c_rarg1); +++ +++ __ call_VM_leaf(ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators), 2); +++ +++ __ pop_call_clobbered_registers_except(RegSet::of(x10)); +++ +++ __ epilogue(); +++} +++ +++#undef __ +++#endif // COMPILER1 ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/gc/z/zBarrierSetAssembler_riscv.hpp ++@@ -0,0 +1,102 @@ +++/* +++ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#ifndef CPU_RISCV_GC_Z_ZBARRIERSETASSEMBLER_RISCV_HPP +++#define CPU_RISCV_GC_Z_ZBARRIERSETASSEMBLER_RISCV_HPP +++ +++#include "code/vmreg.hpp" +++#include "oops/accessDecorators.hpp" +++#ifdef COMPILER2 +++#include "opto/optoreg.hpp" +++#endif // COMPILER2 +++ +++#ifdef COMPILER1 +++class LIR_Assembler; +++class LIR_OprDesc; +++typedef LIR_OprDesc* LIR_Opr; +++class StubAssembler; +++class ZLoadBarrierStubC1; +++#endif // COMPILER1 +++ +++#ifdef COMPILER2 +++class Node; +++class ZLoadBarrierStubC2; +++#endif // COMPILER2 +++ +++class ZBarrierSetAssembler : public ZBarrierSetAssemblerBase { +++public: +++ virtual void load_at(MacroAssembler* masm, +++ DecoratorSet decorators, +++ BasicType type, +++ Register dst, +++ Address src, +++ Register tmp1, +++ Register tmp_thread); +++ +++#ifdef ASSERT +++ virtual void store_at(MacroAssembler* masm, +++ DecoratorSet decorators, +++ BasicType type, +++ Address dst, +++ Register val, +++ Register tmp1, +++ Register tmp2); +++#endif // ASSERT +++ +++ virtual void arraycopy_prologue(MacroAssembler* masm, +++ DecoratorSet decorators, +++ bool is_oop, +++ Register src, +++ Register dst, +++ Register count, +++ RegSet saved_regs); +++ +++ virtual void try_resolve_jobject_in_native(MacroAssembler* masm, +++ Register jni_env, +++ Register robj, +++ Register tmp, +++ Label& slowpath); +++ +++#ifdef COMPILER1 +++ void generate_c1_load_barrier_test(LIR_Assembler* ce, +++ LIR_Opr ref) const; +++ +++ void generate_c1_load_barrier_stub(LIR_Assembler* ce, +++ ZLoadBarrierStubC1* stub) const; +++ +++ void generate_c1_load_barrier_runtime_stub(StubAssembler* sasm, +++ DecoratorSet decorators) const; +++#endif // COMPILER1 +++ +++#ifdef COMPILER2 +++ OptoReg::Name refine_register(const Node* node, +++ OptoReg::Name opto_reg); +++ +++ void generate_c2_load_barrier_stub(MacroAssembler* masm, +++ ZLoadBarrierStubC2* stub) const; +++#endif // COMPILER2 +++}; +++ +++#endif // CPU_RISCV_GC_Z_ZBARRIERSETASSEMBLER_RISCV_HPP ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/gc/z/zGlobals_riscv.cpp ++@@ -0,0 +1,212 @@ +++/* +++ * Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#include "precompiled.hpp" +++#include "gc/shared/gcLogPrecious.hpp" +++#include "gc/shared/gc_globals.hpp" +++#include "gc/z/zGlobals.hpp" +++#include "runtime/globals.hpp" +++#include "runtime/os.hpp" +++#include "utilities/globalDefinitions.hpp" +++#include "utilities/powerOfTwo.hpp" +++ +++#ifdef LINUX +++#include +++#endif // LINUX +++ +++// +++// The heap can have three different layouts, depending on the max heap size. +++// +++// Address Space & Pointer Layout 1 +++// -------------------------------- +++// +++// +--------------------------------+ 0x00007FFFFFFFFFFF (127TB) +++// . . +++// . . +++// . . +++// +--------------------------------+ 0x0000014000000000 (20TB) +++// | Remapped View | +++// +--------------------------------+ 0x0000010000000000 (16TB) +++// . . +++// +--------------------------------+ 0x00000c0000000000 (12TB) +++// | Marked1 View | +++// +--------------------------------+ 0x0000080000000000 (8TB) +++// | Marked0 View | +++// +--------------------------------+ 0x0000040000000000 (4TB) +++// . . +++// +--------------------------------+ 0x0000000000000000 +++// +++// 6 4 4 4 4 +++// 3 6 5 2 1 0 +++// +--------------------+----+-----------------------------------------------+ +++// |00000000 00000000 00|1111|11 11111111 11111111 11111111 11111111 11111111| +++// +--------------------+----+-----------------------------------------------+ +++// | | | +++// | | * 41-0 Object Offset (42-bits, 4TB address space) +++// | | +++// | * 45-42 Metadata Bits (4-bits) 0001 = Marked0 (Address view 4-8TB) +++// | 0010 = Marked1 (Address view 8-12TB) +++// | 0100 = Remapped (Address view 16-20TB) +++// | 1000 = Finalizable (Address view N/A) +++// | +++// * 63-46 Fixed (18-bits, always zero) +++// +++// +++// Address Space & Pointer Layout 2 +++// -------------------------------- +++// +++// +--------------------------------+ 0x00007FFFFFFFFFFF (127TB) +++// . . +++// . . +++// . . +++// +--------------------------------+ 0x0000280000000000 (40TB) +++// | Remapped View | +++// +--------------------------------+ 0x0000200000000000 (32TB) +++// . . +++// +--------------------------------+ 0x0000180000000000 (24TB) +++// | Marked1 View | +++// +--------------------------------+ 0x0000100000000000 (16TB) +++// | Marked0 View | +++// +--------------------------------+ 0x0000080000000000 (8TB) +++// . . +++// +--------------------------------+ 0x0000000000000000 +++// +++// 6 4 4 4 4 +++// 3 7 6 3 2 0 +++// +------------------+-----+------------------------------------------------+ +++// |00000000 00000000 0|1111|111 11111111 11111111 11111111 11111111 11111111| +++// +-------------------+----+------------------------------------------------+ +++// | | | +++// | | * 42-0 Object Offset (43-bits, 8TB address space) +++// | | +++// | * 46-43 Metadata Bits (4-bits) 0001 = Marked0 (Address view 8-16TB) +++// | 0010 = Marked1 (Address view 16-24TB) +++// | 0100 = Remapped (Address view 32-40TB) +++// | 1000 = Finalizable (Address view N/A) +++// | +++// * 63-47 Fixed (17-bits, always zero) +++// +++// +++// Address Space & Pointer Layout 3 +++// -------------------------------- +++// +++// +--------------------------------+ 0x00007FFFFFFFFFFF (127TB) +++// . . +++// . . +++// . . +++// +--------------------------------+ 0x0000500000000000 (80TB) +++// | Remapped View | +++// +--------------------------------+ 0x0000400000000000 (64TB) +++// . . +++// +--------------------------------+ 0x0000300000000000 (48TB) +++// | Marked1 View | +++// +--------------------------------+ 0x0000200000000000 (32TB) +++// | Marked0 View | +++// +--------------------------------+ 0x0000100000000000 (16TB) +++// . . +++// +--------------------------------+ 0x0000000000000000 +++// +++// 6 4 4 4 4 +++// 3 8 7 4 3 0 +++// +------------------+----+-------------------------------------------------+ +++// |00000000 00000000 |1111|1111 11111111 11111111 11111111 11111111 11111111| +++// +------------------+----+-------------------------------------------------+ +++// | | | +++// | | * 43-0 Object Offset (44-bits, 16TB address space) +++// | | +++// | * 47-44 Metadata Bits (4-bits) 0001 = Marked0 (Address view 16-32TB) +++// | 0010 = Marked1 (Address view 32-48TB) +++// | 0100 = Remapped (Address view 64-80TB) +++// | 1000 = Finalizable (Address view N/A) +++// | +++// * 63-48 Fixed (16-bits, always zero) +++// +++ +++// Default value if probing is not implemented for a certain platform: 128TB +++static const size_t DEFAULT_MAX_ADDRESS_BIT = 47; +++// Minimum value returned, if probing fails: 64GB +++static const size_t MINIMUM_MAX_ADDRESS_BIT = 36; +++ +++static size_t probe_valid_max_address_bit() { +++#ifdef LINUX +++ size_t max_address_bit = 0; +++ const size_t page_size = os::vm_page_size(); +++ for (size_t i = DEFAULT_MAX_ADDRESS_BIT; i > MINIMUM_MAX_ADDRESS_BIT; --i) { +++ const uintptr_t base_addr = ((uintptr_t) 1U) << i; +++ if (msync((void*)base_addr, page_size, MS_ASYNC) == 0) { +++ // msync suceeded, the address is valid, and maybe even already mapped. +++ max_address_bit = i; +++ break; +++ } +++ if (errno != ENOMEM) { +++ // Some error occured. This should never happen, but msync +++ // has some undefined behavior, hence ignore this bit. +++#ifdef ASSERT +++ fatal("Received '%s' while probing the address space for the highest valid bit", os::errno_name(errno)); +++#else // ASSERT +++ log_warning_p(gc)("Received '%s' while probing the address space for the highest valid bit", os::errno_name(errno)); +++#endif // ASSERT +++ continue; +++ } +++ // Since msync failed with ENOMEM, the page might not be mapped. +++ // Try to map it, to see if the address is valid. +++ void* const result_addr = mmap((void*) base_addr, page_size, PROT_NONE, MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE, -1, 0); +++ if (result_addr != MAP_FAILED) { +++ munmap(result_addr, page_size); +++ } +++ if ((uintptr_t) result_addr == base_addr) { +++ // address is valid +++ max_address_bit = i; +++ break; +++ } +++ } +++ if (max_address_bit == 0) { +++ // probing failed, allocate a very high page and take that bit as the maximum +++ const uintptr_t high_addr = ((uintptr_t) 1U) << DEFAULT_MAX_ADDRESS_BIT; +++ void* const result_addr = mmap((void*) high_addr, page_size, PROT_NONE, MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE, -1, 0); +++ if (result_addr != MAP_FAILED) { +++ max_address_bit = BitsPerSize_t - count_leading_zeros((size_t) result_addr) - 1; +++ munmap(result_addr, page_size); +++ } +++ } +++ log_info_p(gc, init)("Probing address space for the highest valid bit: " SIZE_FORMAT, max_address_bit); +++ return MAX2(max_address_bit, MINIMUM_MAX_ADDRESS_BIT); +++#else // LINUX +++ return DEFAULT_MAX_ADDRESS_BIT; +++#endif // LINUX +++} +++ +++size_t ZPlatformAddressOffsetBits() { +++ const static size_t valid_max_address_offset_bits = probe_valid_max_address_bit() + 1; +++ const size_t max_address_offset_bits = valid_max_address_offset_bits - 3; +++ const size_t min_address_offset_bits = max_address_offset_bits - 2; +++ const size_t address_offset = round_up_power_of_2(MaxHeapSize * ZVirtualToPhysicalRatio); +++ const size_t address_offset_bits = log2i_exact(address_offset); +++ return clamp(address_offset_bits, min_address_offset_bits, max_address_offset_bits); +++} +++ +++size_t ZPlatformAddressMetadataShift() { +++ return ZPlatformAddressOffsetBits(); +++} ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/gc/z/zGlobals_riscv.hpp ++@@ -0,0 +1,36 @@ +++/* +++ * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#ifndef CPU_RISCV_GC_Z_ZGLOBALS_RISCV_HPP +++#define CPU_RISCV_GC_Z_ZGLOBALS_RISCV_HPP +++ +++const size_t ZPlatformGranuleSizeShift = 21; // 2MB +++const size_t ZPlatformHeapViews = 3; +++const size_t ZPlatformCacheLineSize = 64; +++ +++size_t ZPlatformAddressOffsetBits(); +++size_t ZPlatformAddressMetadataShift(); +++ +++#endif // CPU_RISCV_GC_Z_ZGLOBALS_RISCV_HPP ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/gc/z/z_riscv64.ad ++@@ -0,0 +1,233 @@ +++// +++// Copyright (c) 2019, 2021, Oracle and/or its affiliates. All rights reserved. +++// Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved. +++// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++// +++// This code is free software; you can redistribute it and/or modify it +++// under the terms of the GNU General Public License version 2 only, as +++// published by the Free Software Foundation. +++// +++// This code is distributed in the hope that it will be useful, but WITHOUT +++// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++// version 2 for more details (a copy is included in the LICENSE file that +++// accompanied this code). +++// +++// You should have received a copy of the GNU General Public License version +++// 2 along with this work; if not, write to the Free Software Foundation, +++// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++// +++// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++// or visit www.oracle.com if you need additional information or have any +++// questions. +++// +++ +++source_hpp %{ +++ +++#include "gc/shared/gc_globals.hpp" +++#include "gc/z/c2/zBarrierSetC2.hpp" +++#include "gc/z/zThreadLocalData.hpp" +++ +++%} +++ +++source %{ +++ +++static void z_load_barrier(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref, Register tmp, int barrier_data) { +++ if (barrier_data == ZLoadBarrierElided) { +++ return; +++ } +++ ZLoadBarrierStubC2* const stub = ZLoadBarrierStubC2::create(node, ref_addr, ref, tmp, barrier_data); +++ __ ld(tmp, Address(xthread, ZThreadLocalData::address_bad_mask_offset())); +++ __ andr(tmp, tmp, ref); +++ __ bnez(tmp, *stub->entry(), true /* far */); +++ __ bind(*stub->continuation()); +++} +++ +++static void z_load_barrier_slow_path(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref, Register tmp) { +++ ZLoadBarrierStubC2* const stub = ZLoadBarrierStubC2::create(node, ref_addr, ref, tmp, ZLoadBarrierStrong); +++ __ j(*stub->entry()); +++ __ bind(*stub->continuation()); +++} +++ +++%} +++ +++// Load Pointer +++instruct zLoadP(iRegPNoSp dst, memory mem) +++%{ +++ match(Set dst (LoadP mem)); +++ predicate(UseZGC && (n->as_Load()->barrier_data() != 0)); +++ effect(TEMP dst); +++ +++ ins_cost(4 * DEFAULT_COST); +++ +++ format %{ "ld $dst, $mem, #@zLoadP" %} +++ +++ ins_encode %{ +++ const Address ref_addr (as_Register($mem$$base), $mem$$disp); +++ __ ld($dst$$Register, ref_addr); +++ z_load_barrier(_masm, this, ref_addr, $dst$$Register, t0 /* tmp */, barrier_data()); +++ %} +++ +++ ins_pipe(iload_reg_mem); +++%} +++ +++instruct zCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{ +++ match(Set res (CompareAndSwapP mem (Binary oldval newval))); +++ match(Set res (WeakCompareAndSwapP mem (Binary oldval newval))); +++ predicate(UseZGC && !needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong); +++ effect(KILL cr, TEMP_DEF res); +++ +++ ins_cost(2 * VOLATILE_REF_COST); +++ +++ format %{ "cmpxchg $mem, $oldval, $newval, #@zCompareAndSwapP\n\t" +++ "mv $res, $res == $oldval" %} +++ +++ ins_encode %{ +++ Label failed; +++ guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding"); +++ __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::int64, +++ Assembler::relaxed /* acquire */, Assembler::rl /* release */, $res$$Register, +++ true /* result_as_bool */); +++ __ beqz($res$$Register, failed); +++ __ mv(t0, $oldval$$Register); +++ __ bind(failed); +++ if (barrier_data() != ZLoadBarrierElided) { +++ Label good; +++ __ ld(t1, Address(xthread, ZThreadLocalData::address_bad_mask_offset()), t1 /* tmp */); +++ __ andr(t1, t1, t0); +++ __ beqz(t1, good); +++ z_load_barrier_slow_path(_masm, this, Address($mem$$Register), t0 /* ref */, t1 /* tmp */); +++ __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::int64, +++ Assembler::relaxed /* acquire */, Assembler::rl /* release */, $res$$Register, +++ true /* result_as_bool */); +++ __ bind(good); +++ } +++ %} +++ +++ ins_pipe(pipe_slow); +++%} +++ +++instruct zCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{ +++ match(Set res (CompareAndSwapP mem (Binary oldval newval))); +++ match(Set res (WeakCompareAndSwapP mem (Binary oldval newval))); +++ predicate(UseZGC && needs_acquiring_load_reserved(n) && (n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong)); +++ effect(KILL cr, TEMP_DEF res); +++ +++ ins_cost(2 * VOLATILE_REF_COST); +++ +++ format %{ "cmpxchg $mem, $oldval, $newval, #@zCompareAndSwapPAcq\n\t" +++ "mv $res, $res == $oldval" %} +++ +++ ins_encode %{ +++ Label failed; +++ guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding"); +++ __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::int64, +++ Assembler::aq /* acquire */, Assembler::rl /* release */, $res$$Register, +++ true /* result_as_bool */); +++ __ beqz($res$$Register, failed); +++ __ mv(t0, $oldval$$Register); +++ __ bind(failed); +++ if (barrier_data() != ZLoadBarrierElided) { +++ Label good; +++ __ ld(t1, Address(xthread, ZThreadLocalData::address_bad_mask_offset()), t1 /* tmp */); +++ __ andr(t1, t1, t0); +++ __ beqz(t1, good); +++ z_load_barrier_slow_path(_masm, this, Address($mem$$Register), t0 /* ref */, t1 /* tmp */); +++ __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::int64, +++ Assembler::aq /* acquire */, Assembler::rl /* release */, $res$$Register, +++ true /* result_as_bool */); +++ __ bind(good); +++ } +++ %} +++ +++ ins_pipe(pipe_slow); +++%} +++ +++instruct zCompareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval) %{ +++ match(Set res (CompareAndExchangeP mem (Binary oldval newval))); +++ predicate(UseZGC && !needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong); +++ effect(TEMP_DEF res); +++ +++ ins_cost(2 * VOLATILE_REF_COST); +++ +++ format %{ "cmpxchg $res = $mem, $oldval, $newval, #@zCompareAndExchangeP" %} +++ +++ ins_encode %{ +++ guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding"); +++ __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::int64, +++ Assembler::relaxed /* acquire */, Assembler::rl /* release */, $res$$Register); +++ if (barrier_data() != ZLoadBarrierElided) { +++ Label good; +++ __ ld(t0, Address(xthread, ZThreadLocalData::address_bad_mask_offset())); +++ __ andr(t0, t0, $res$$Register); +++ __ beqz(t0, good); +++ z_load_barrier_slow_path(_masm, this, Address($mem$$Register), $res$$Register /* ref */, t0 /* tmp */); +++ __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::int64, +++ Assembler::relaxed /* acquire */, Assembler::rl /* release */, $res$$Register); +++ __ bind(good); +++ } +++ %} +++ +++ ins_pipe(pipe_slow); +++%} +++ +++instruct zCompareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval) %{ +++ match(Set res (CompareAndExchangeP mem (Binary oldval newval))); +++ predicate(UseZGC && needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong); +++ effect(TEMP_DEF res); +++ +++ ins_cost(2 * VOLATILE_REF_COST); +++ +++ format %{ "cmpxchg $res = $mem, $oldval, $newval, #@zCompareAndExchangePAcq" %} +++ +++ ins_encode %{ +++ guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding"); +++ __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::int64, +++ Assembler::aq /* acquire */, Assembler::rl /* release */, $res$$Register); +++ if (barrier_data() != ZLoadBarrierElided) { +++ Label good; +++ __ ld(t0, Address(xthread, ZThreadLocalData::address_bad_mask_offset())); +++ __ andr(t0, t0, $res$$Register); +++ __ beqz(t0, good); +++ z_load_barrier_slow_path(_masm, this, Address($mem$$Register), $res$$Register /* ref */, t0 /* tmp */); +++ __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::int64, +++ Assembler::aq /* acquire */, Assembler::rl /* release */, $res$$Register); +++ __ bind(good); +++ } +++ %} +++ +++ ins_pipe(pipe_slow); +++%} +++ +++instruct zGetAndSetP(indirect mem, iRegP newv, iRegPNoSp prev, rFlagsReg cr) %{ +++ match(Set prev (GetAndSetP mem newv)); +++ predicate(UseZGC && !needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() != 0); +++ effect(TEMP_DEF prev, KILL cr); +++ +++ ins_cost(2 * VOLATILE_REF_COST); +++ +++ format %{ "atomic_xchg $prev, $newv, [$mem], #@zGetAndSetP" %} +++ +++ ins_encode %{ +++ __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base)); +++ z_load_barrier(_masm, this, Address(noreg, 0), $prev$$Register, t0 /* tmp */, barrier_data()); +++ %} +++ +++ ins_pipe(pipe_serial); +++%} +++ +++instruct zGetAndSetPAcq(indirect mem, iRegP newv, iRegPNoSp prev, rFlagsReg cr) %{ +++ match(Set prev (GetAndSetP mem newv)); +++ predicate(UseZGC && needs_acquiring_load_reserved(n) && (n->as_LoadStore()->barrier_data() != 0)); +++ effect(TEMP_DEF prev, KILL cr); +++ +++ ins_cost(VOLATILE_REF_COST); +++ +++ format %{ "atomic_xchg_acq $prev, $newv, [$mem], #@zGetAndSetPAcq" %} +++ +++ ins_encode %{ +++ __ atomic_xchgal($prev$$Register, $newv$$Register, as_Register($mem$$base)); +++ z_load_barrier(_masm, this, Address(noreg, 0), $prev$$Register, t0 /* tmp */, barrier_data()); +++ %} +++ ins_pipe(pipe_serial); +++%} ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/globalDefinitions_riscv.hpp ++@@ -0,0 +1,52 @@ +++/* +++ * Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2014, 2015, Red Hat Inc. All rights reserved. +++ * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#ifndef CPU_RISCV_GLOBALDEFINITIONS_RISCV_HPP +++#define CPU_RISCV_GLOBALDEFINITIONS_RISCV_HPP +++ +++const int StackAlignmentInBytes = 16; +++ +++// Indicates whether the C calling conventions require that +++// 32-bit integer argument values are extended to 64 bits. +++const bool CCallingConventionRequiresIntsAsLongs = false; +++ +++// RISCV has adopted a multicopy atomic model closely following +++// that of ARMv8. +++#define CPU_MULTI_COPY_ATOMIC +++ +++// To be safe, we deoptimize when we come across an access that needs +++// patching. This is similar to what is done on aarch64. +++#define DEOPTIMIZE_WHEN_PATCHING +++ +++#define SUPPORTS_NATIVE_CX8 +++ +++#define SUPPORT_RESERVED_STACK_AREA +++ +++#define COMPRESSED_CLASS_POINTERS_DEPENDS_ON_COMPRESSED_OOPS false +++ +++#define USE_POINTERS_TO_REGISTER_IMPL_ARRAY +++ +++#endif // CPU_RISCV_GLOBALDEFINITIONS_RISCV_HPP ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/globals_riscv.hpp ++@@ -0,0 +1,101 @@ +++/* +++ * Copyright (c) 2000, 2020, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#ifndef CPU_RISCV_GLOBALS_RISCV_HPP +++#define CPU_RISCV_GLOBALS_RISCV_HPP +++ +++#include "utilities/globalDefinitions.hpp" +++#include "utilities/macros.hpp" +++ +++// Sets the default values for platform dependent flags used by the runtime system. +++// (see globals.hpp) +++ +++define_pd_global(bool, ImplicitNullChecks, true); // Generate code for implicit null checks +++define_pd_global(bool, TrapBasedNullChecks, false); +++define_pd_global(bool, UncommonNullCast, true); // Uncommon-trap NULLs past to check cast +++ +++define_pd_global(uintx, CodeCacheSegmentSize, 64 COMPILER1_AND_COMPILER2_PRESENT(+64)); // Tiered compilation has large code-entry alignment. +++define_pd_global(intx, CodeEntryAlignment, 64); +++define_pd_global(intx, OptoLoopAlignment, 16); +++define_pd_global(intx, InlineFrequencyCount, 100); +++ +++#define DEFAULT_STACK_YELLOW_PAGES (2) +++#define DEFAULT_STACK_RED_PAGES (1) +++// Java_java_net_SocketOutputStream_socketWrite0() uses a 64k buffer on the +++// stack if compiled for unix and LP64. To pass stack overflow tests we need +++// 20 shadow pages. +++#define DEFAULT_STACK_SHADOW_PAGES (20 DEBUG_ONLY(+5)) +++#define DEFAULT_STACK_RESERVED_PAGES (1) +++ +++#define MIN_STACK_YELLOW_PAGES DEFAULT_STACK_YELLOW_PAGES +++#define MIN_STACK_RED_PAGES DEFAULT_STACK_RED_PAGES +++#define MIN_STACK_SHADOW_PAGES DEFAULT_STACK_SHADOW_PAGES +++#define MIN_STACK_RESERVED_PAGES (0) +++ +++define_pd_global(intx, StackYellowPages, DEFAULT_STACK_YELLOW_PAGES); +++define_pd_global(intx, StackRedPages, DEFAULT_STACK_RED_PAGES); +++define_pd_global(intx, StackShadowPages, DEFAULT_STACK_SHADOW_PAGES); +++define_pd_global(intx, StackReservedPages, DEFAULT_STACK_RESERVED_PAGES); +++ +++define_pd_global(bool, RewriteBytecodes, true); +++define_pd_global(bool, RewriteFrequentPairs, true); +++ +++define_pd_global(bool, PreserveFramePointer, false); +++ +++define_pd_global(uintx, TypeProfileLevel, 111); +++ +++define_pd_global(bool, CompactStrings, true); +++ +++// Clear short arrays bigger than one word in an arch-specific way +++define_pd_global(intx, InitArrayShortSize, BytesPerLong); +++ +++define_pd_global(intx, InlineSmallCode, 1000); +++ +++#define ARCH_FLAGS(develop, \ +++ product, \ +++ notproduct, \ +++ range, \ +++ constraint) \ +++ \ +++ product(bool, NearCpool, true, \ +++ "constant pool is close to instructions") \ +++ product(intx, BlockZeroingLowLimit, 256, \ +++ "Minimum size in bytes when block zeroing will be used") \ +++ range(1, max_jint) \ +++ product(bool, TraceTraps, false, "Trace all traps the signal handler") \ +++ /* For now we're going to be safe and add the I/O bits to userspace fences. */ \ +++ product(bool, UseConservativeFence, true, \ +++ "Extend i for r and o for w in the pred/succ flags of fence") \ +++ product(bool, AvoidUnalignedAccesses, true, \ +++ "Avoid generating unaligned memory accesses") \ +++ product(bool, UseRVC, true, "Use RVC instructions") \ +++ product(bool, UseRVV, false, EXPERIMENTAL, "Use RVV instructions") \ +++ product(bool, UseZba, false, EXPERIMENTAL, "Use Zba instructions") \ +++ product(bool, UseZbb, false, EXPERIMENTAL, "Use Zbb instructions") \ +++ product(bool, UseZbs, false, EXPERIMENTAL, "Use Zbs instructions") \ +++ product(bool, UseRVVForBigIntegerShiftIntrinsics, true, \ +++ "Use RVV instructions for left/right shift of BigInteger") +++ +++#endif // CPU_RISCV_GLOBALS_RISCV_HPP ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/icBuffer_riscv.cpp ++@@ -0,0 +1,79 @@ +++/* +++ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2014, Red Hat Inc. All rights reserved. +++ * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#include "precompiled.hpp" +++#include "asm/macroAssembler.hpp" +++#include "asm/macroAssembler.inline.hpp" +++#include "code/icBuffer.hpp" +++#include "gc/shared/collectedHeap.inline.hpp" +++#include "interpreter/bytecodes.hpp" +++#include "memory/resourceArea.hpp" +++#include "nativeInst_riscv.hpp" +++#include "oops/oop.inline.hpp" +++ +++int InlineCacheBuffer::ic_stub_code_size() { +++ // 6: auipc + ld + auipc + jalr + address(2 * instruction_size) +++ // 5: auipc + ld + j + address(2 * instruction_size) +++ return (MacroAssembler::far_branches() ? 6 : 5) * NativeInstruction::instruction_size; +++} +++ +++#define __ masm-> +++ +++void InlineCacheBuffer::assemble_ic_buffer_code(address code_begin, void* cached_value, address entry_point) { +++ assert_cond(code_begin != NULL && entry_point != NULL); +++ ResourceMark rm; +++ CodeBuffer code(code_begin, ic_stub_code_size()); +++ MacroAssembler* masm = new MacroAssembler(&code); +++ // Note: even though the code contains an embedded value, we do not need reloc info +++ // because +++ // (1) the value is old (i.e., doesn't matter for scavenges) +++ // (2) these ICStubs are removed *before* a GC happens, so the roots disappear +++ +++ address start = __ pc(); +++ Label l; +++ __ ld(t1, l); +++ __ far_jump(ExternalAddress(entry_point)); +++ __ align(wordSize); +++ __ bind(l); +++ __ emit_int64((intptr_t)cached_value); +++ // Only need to invalidate the 1st two instructions - not the whole ic stub +++ ICache::invalidate_range(code_begin, InlineCacheBuffer::ic_stub_code_size()); +++ assert(__ pc() - start == ic_stub_code_size(), "must be"); +++} +++ +++address InlineCacheBuffer::ic_buffer_entry_point(address code_begin) { +++ NativeMovConstReg* move = nativeMovConstReg_at(code_begin); // creation also verifies the object +++ NativeJump* jump = nativeJump_at(move->next_instruction_address()); +++ return jump->jump_destination(); +++} +++ +++ +++void* InlineCacheBuffer::ic_buffer_cached_value(address code_begin) { +++ // The word containing the cached value is at the end of this IC buffer +++ uintptr_t *p = (uintptr_t *)(code_begin + ic_stub_code_size() - wordSize); +++ void* o = (void*)*p; +++ return o; +++} ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/icache_riscv.cpp ++@@ -0,0 +1,59 @@ +++/* +++ * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#include "precompiled.hpp" +++#include "asm/macroAssembler.hpp" +++#include "runtime/icache.hpp" +++ +++#define __ _masm-> +++ +++static int icache_flush(address addr, int lines, int magic) { +++ // To make a store to instruction memory visible to all RISC-V harts, +++ // the writing hart has to execute a data FENCE before requesting that +++ // all remote RISC-V harts execute a FENCE.I. +++ // +++ // No sush assurance is defined at the interface level of the builtin +++ // method, and so we should make sure it works. +++ __asm__ volatile("fence rw, rw" : : : "memory"); +++ +++ __builtin___clear_cache(addr, addr + (lines << ICache::log2_line_size)); +++ return magic; +++} +++ +++void ICacheStubGenerator::generate_icache_flush(ICache::flush_icache_stub_t* flush_icache_stub) { +++ address start = (address)icache_flush; +++ *flush_icache_stub = (ICache::flush_icache_stub_t)start; +++ +++ // ICache::invalidate_range() contains explicit condition that the first +++ // call is invoked on the generated icache flush stub code range. +++ ICache::invalidate_range(start, 0); +++ +++ { +++ StubCodeMark mark(this, "ICache", "fake_stub_for_inlined_icache_flush"); +++ __ ret(); +++ } +++} +++ +++#undef __ ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/icache_riscv.hpp ++@@ -0,0 +1,42 @@ +++/* +++ * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#ifndef CPU_RISCV_ICACHE_RISCV_HPP +++#define CPU_RISCV_ICACHE_RISCV_HPP +++ +++// Interface for updating the instruction cache. Whenever the VM +++// modifies code, part of the processor instruction cache potentially +++// has to be flushed. +++ +++class ICache : public AbstractICache { +++public: +++ enum { +++ stub_size = 16, // Size of the icache flush stub in bytes +++ line_size = BytesPerWord, // conservative +++ log2_line_size = LogBytesPerWord // log2(line_size) +++ }; +++}; +++ +++#endif // CPU_RISCV_ICACHE_RISCV_HPP ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/interp_masm_riscv.cpp ++@@ -0,0 +1,1950 @@ +++/* +++ * Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved. +++ * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#include "precompiled.hpp" +++#include "asm/macroAssembler.inline.hpp" +++#include "gc/shared/barrierSet.hpp" +++#include "gc/shared/barrierSetAssembler.hpp" +++#include "interp_masm_riscv.hpp" +++#include "interpreter/interpreter.hpp" +++#include "interpreter/interpreterRuntime.hpp" +++#include "logging/log.hpp" +++#include "oops/arrayOop.hpp" +++#include "oops/markWord.hpp" +++#include "oops/method.hpp" +++#include "oops/methodData.hpp" +++#include "prims/jvmtiExport.hpp" +++#include "prims/jvmtiThreadState.hpp" +++#include "runtime/basicLock.hpp" +++#include "runtime/frame.inline.hpp" +++#include "runtime/safepointMechanism.hpp" +++#include "runtime/sharedRuntime.hpp" +++#include "runtime/thread.inline.hpp" +++#include "utilities/powerOfTwo.hpp" +++ +++void InterpreterMacroAssembler::narrow(Register result) { +++ // Get method->_constMethod->_result_type +++ ld(t0, Address(fp, frame::interpreter_frame_method_offset * wordSize)); +++ ld(t0, Address(t0, Method::const_offset())); +++ lbu(t0, Address(t0, ConstMethod::result_type_offset())); +++ +++ Label done, notBool, notByte, notChar; +++ +++ // common case first +++ mv(t1, T_INT); +++ beq(t0, t1, done); +++ +++ // mask integer result to narrower return type. +++ mv(t1, T_BOOLEAN); +++ bne(t0, t1, notBool); +++ +++ andi(result, result, 0x1); +++ j(done); +++ +++ bind(notBool); +++ mv(t1, T_BYTE); +++ bne(t0, t1, notByte); +++ sign_extend(result, result, 8); +++ j(done); +++ +++ bind(notByte); +++ mv(t1, T_CHAR); +++ bne(t0, t1, notChar); +++ zero_extend(result, result, 16); +++ j(done); +++ +++ bind(notChar); +++ sign_extend(result, result, 16); +++ +++ bind(done); +++ sign_extend(result, result, 32); +++} +++ +++void InterpreterMacroAssembler::jump_to_entry(address entry) { +++ assert(entry != NULL, "Entry must have been generated by now"); +++ j(entry); +++} +++ +++void InterpreterMacroAssembler::check_and_handle_popframe(Register java_thread) { +++ if (JvmtiExport::can_pop_frame()) { +++ Label L; +++ // Initiate popframe handling only if it is not already being +++ // processed. If the flag has the popframe_processing bit set, +++ // it means that this code is called *during* popframe handling - we +++ // don't want to reenter. +++ // This method is only called just after the call into the vm in +++ // call_VM_base, so the arg registers are available. +++ lwu(t1, Address(xthread, JavaThread::popframe_condition_offset())); +++ test_bit(t0, t1, exact_log2(JavaThread::popframe_pending_bit)); +++ beqz(t0, L); +++ test_bit(t0, t1, exact_log2(JavaThread::popframe_processing_bit)); +++ bnez(t0, L); +++ // Call Interpreter::remove_activation_preserving_args_entry() to get the +++ // address of the same-named entrypoint in the generated interpreter code. +++ call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_preserving_args_entry)); +++ jr(x10); +++ bind(L); +++ } +++} +++ +++ +++void InterpreterMacroAssembler::load_earlyret_value(TosState state) { +++ ld(x12, Address(xthread, JavaThread::jvmti_thread_state_offset())); +++ const Address tos_addr(x12, JvmtiThreadState::earlyret_tos_offset()); +++ const Address oop_addr(x12, JvmtiThreadState::earlyret_oop_offset()); +++ const Address val_addr(x12, JvmtiThreadState::earlyret_value_offset()); +++ switch (state) { +++ case atos: +++ ld(x10, oop_addr); +++ sd(zr, oop_addr); +++ verify_oop(x10); +++ break; +++ case ltos: +++ ld(x10, val_addr); +++ break; +++ case btos: // fall through +++ case ztos: // fall through +++ case ctos: // fall through +++ case stos: // fall through +++ case itos: +++ lwu(x10, val_addr); +++ break; +++ case ftos: +++ flw(f10, val_addr); +++ break; +++ case dtos: +++ fld(f10, val_addr); +++ break; +++ case vtos: +++ /* nothing to do */ +++ break; +++ default: +++ ShouldNotReachHere(); +++ } +++ // Clean up tos value in the thread object +++ mv(t0, (int)ilgl); +++ sw(t0, tos_addr); +++ sw(zr, val_addr); +++} +++ +++ +++void InterpreterMacroAssembler::check_and_handle_earlyret(Register java_thread) { +++ if (JvmtiExport::can_force_early_return()) { +++ Label L; +++ ld(t0, Address(xthread, JavaThread::jvmti_thread_state_offset())); +++ beqz(t0, L); // if [thread->jvmti_thread_state() == NULL] then exit +++ +++ // Initiate earlyret handling only if it is not already being processed. +++ // If the flag has the earlyret_processing bit set, it means that this code +++ // is called *during* earlyret handling - we don't want to reenter. +++ lwu(t0, Address(t0, JvmtiThreadState::earlyret_state_offset())); +++ mv(t1, JvmtiThreadState::earlyret_pending); +++ bne(t0, t1, L); +++ +++ // Call Interpreter::remove_activation_early_entry() to get the address of the +++ // same-named entrypoint in the generated interpreter code. +++ ld(t0, Address(xthread, JavaThread::jvmti_thread_state_offset())); +++ lwu(t0, Address(t0, JvmtiThreadState::earlyret_tos_offset())); +++ call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_early_entry), t0); +++ jr(x10); +++ bind(L); +++ } +++} +++ +++void InterpreterMacroAssembler::get_unsigned_2_byte_index_at_bcp(Register reg, int bcp_offset) { +++ assert(bcp_offset >= 0, "bcp is still pointing to start of bytecode"); +++ lhu(reg, Address(xbcp, bcp_offset)); +++ revb_h(reg, reg); +++} +++ +++void InterpreterMacroAssembler::get_dispatch() { +++ ExternalAddress target((address)Interpreter::dispatch_table()); +++ relocate(target.rspec(), [&] { +++ int32_t offset; +++ la_patchable(xdispatch, target, offset); +++ addi(xdispatch, xdispatch, offset); +++ }); +++} +++ +++void InterpreterMacroAssembler::get_cache_index_at_bcp(Register index, +++ int bcp_offset, +++ size_t index_size) { +++ assert(bcp_offset > 0, "bcp is still pointing to start of bytecode"); +++ if (index_size == sizeof(u2)) { +++ load_unsigned_short(index, Address(xbcp, bcp_offset)); +++ } else if (index_size == sizeof(u4)) { +++ lwu(index, Address(xbcp, bcp_offset)); +++ // Check if the secondary index definition is still ~x, otherwise +++ // we have to change the following assembler code to calculate the +++ // plain index. +++ assert(ConstantPool::decode_invokedynamic_index(~123) == 123, "else change next line"); +++ xori(index, index, -1); +++ sign_extend(index, index, 32); +++ } else if (index_size == sizeof(u1)) { +++ load_unsigned_byte(index, Address(xbcp, bcp_offset)); +++ } else { +++ ShouldNotReachHere(); +++ } +++} +++ +++// Return +++// Rindex: index into constant pool +++// Rcache: address of cache entry - ConstantPoolCache::base_offset() +++// +++// A caller must add ConstantPoolCache::base_offset() to Rcache to get +++// the true address of the cache entry. +++// +++void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache, +++ Register index, +++ int bcp_offset, +++ size_t index_size) { +++ assert_different_registers(cache, index); +++ assert_different_registers(cache, xcpool); +++ get_cache_index_at_bcp(index, bcp_offset, index_size); +++ assert(sizeof(ConstantPoolCacheEntry) == 4 * wordSize, "adjust code below"); +++ // Convert from field index to ConstantPoolCacheEntry +++ // riscv already has the cache in xcpool so there is no need to +++ // install it in cache. Instead we pre-add the indexed offset to +++ // xcpool and return it in cache. All clients of this method need to +++ // be modified accordingly. +++ shadd(cache, index, xcpool, cache, 5); +++} +++ +++ +++void InterpreterMacroAssembler::get_cache_and_index_and_bytecode_at_bcp(Register cache, +++ Register index, +++ Register bytecode, +++ int byte_no, +++ int bcp_offset, +++ size_t index_size) { +++ get_cache_and_index_at_bcp(cache, index, bcp_offset, index_size); +++ // We use a 32-bit load here since the layout of 64-bit words on +++ // little-endian machines allow us that. +++ // n.b. unlike x86 cache already includes the index offset +++ la(bytecode, Address(cache, +++ ConstantPoolCache::base_offset() + +++ ConstantPoolCacheEntry::indices_offset())); +++ membar(MacroAssembler::AnyAny); +++ lwu(bytecode, bytecode); +++ membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore); +++ const int shift_count = (1 + byte_no) * BitsPerByte; +++ slli(bytecode, bytecode, XLEN - (shift_count + BitsPerByte)); +++ srli(bytecode, bytecode, XLEN - BitsPerByte); +++} +++ +++void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache, +++ Register tmp, +++ int bcp_offset, +++ size_t index_size) { +++ assert_different_registers(cache, tmp); +++ get_cache_index_at_bcp(tmp, bcp_offset, index_size); +++ assert(sizeof(ConstantPoolCacheEntry) == 4 * wordSize, "adjust code below"); +++ // Convert from field index to ConstantPoolCacheEntry index +++ // and from word offset to byte offset +++ assert(exact_log2(in_bytes(ConstantPoolCacheEntry::size_in_bytes())) == 2 + LogBytesPerWord, +++ "else change next line"); +++ ld(cache, Address(fp, frame::interpreter_frame_cache_offset * wordSize)); +++ // skip past the header +++ add(cache, cache, in_bytes(ConstantPoolCache::base_offset())); +++ // construct pointer to cache entry +++ shadd(cache, tmp, cache, tmp, 2 + LogBytesPerWord); +++} +++ +++// Load object from cpool->resolved_references(index) +++void InterpreterMacroAssembler::load_resolved_reference_at_index( +++ Register result, Register index, Register tmp) { +++ assert_different_registers(result, index); +++ +++ get_constant_pool(result); +++ // Load pointer for resolved_references[] objArray +++ ld(result, Address(result, ConstantPool::cache_offset_in_bytes())); +++ ld(result, Address(result, ConstantPoolCache::resolved_references_offset_in_bytes())); +++ resolve_oop_handle(result, tmp); +++ // Add in the index +++ addi(index, index, arrayOopDesc::base_offset_in_bytes(T_OBJECT) >> LogBytesPerHeapOop); +++ shadd(result, index, result, index, LogBytesPerHeapOop); +++ load_heap_oop(result, Address(result, 0)); +++} +++ +++void InterpreterMacroAssembler::load_resolved_klass_at_offset( +++ Register cpool, Register index, Register klass, Register temp) { +++ shadd(temp, index, cpool, temp, LogBytesPerWord); +++ lhu(temp, Address(temp, sizeof(ConstantPool))); // temp = resolved_klass_index +++ ld(klass, Address(cpool, ConstantPool::resolved_klasses_offset_in_bytes())); // klass = cpool->_resolved_klasses +++ shadd(klass, temp, klass, temp, LogBytesPerWord); +++ ld(klass, Address(klass, Array::base_offset_in_bytes())); +++} +++ +++void InterpreterMacroAssembler::load_resolved_method_at_index(int byte_no, +++ Register method, +++ Register cache) { +++ const int method_offset = in_bytes( +++ ConstantPoolCache::base_offset() + +++ ((byte_no == TemplateTable::f2_byte) +++ ? ConstantPoolCacheEntry::f2_offset() +++ : ConstantPoolCacheEntry::f1_offset())); +++ +++ ld(method, Address(cache, method_offset)); // get f1 Method* +++} +++ +++// Generate a subtype check: branch to ok_is_subtype if sub_klass is a +++// subtype of super_klass. +++// +++// Args: +++// x10: superklass +++// Rsub_klass: subklass +++// +++// Kills: +++// x12, x15 +++void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass, +++ Label& ok_is_subtype) { +++ assert(Rsub_klass != x10, "x10 holds superklass"); +++ assert(Rsub_klass != x12, "x12 holds 2ndary super array length"); +++ assert(Rsub_klass != x15, "x15 holds 2ndary super array scan ptr"); +++ +++ // Profile the not-null value's klass. +++ profile_typecheck(x12, Rsub_klass, x15); // blows x12, reloads x15 +++ +++ // Do the check. +++ check_klass_subtype(Rsub_klass, x10, x12, ok_is_subtype); // blows x12 +++ +++ // Profile the failure of the check. +++ profile_typecheck_failed(x12); // blows x12 +++} +++ +++// Java Expression Stack +++ +++void InterpreterMacroAssembler::pop_ptr(Register r) { +++ ld(r, Address(esp, 0)); +++ addi(esp, esp, wordSize); +++} +++ +++void InterpreterMacroAssembler::pop_i(Register r) { +++ lw(r, Address(esp, 0)); // lw do signed extended +++ addi(esp, esp, wordSize); +++} +++ +++void InterpreterMacroAssembler::pop_l(Register r) { +++ ld(r, Address(esp, 0)); +++ addi(esp, esp, 2 * Interpreter::stackElementSize); +++} +++ +++void InterpreterMacroAssembler::push_ptr(Register r) { +++ addi(esp, esp, -wordSize); +++ sd(r, Address(esp, 0)); +++} +++ +++void InterpreterMacroAssembler::push_i(Register r) { +++ addi(esp, esp, -wordSize); +++ sign_extend(r, r, 32); +++ sd(r, Address(esp, 0)); +++} +++ +++void InterpreterMacroAssembler::push_l(Register r) { +++ addi(esp, esp, -2 * wordSize); +++ sd(zr, Address(esp, wordSize)); +++ sd(r, Address(esp)); +++} +++ +++void InterpreterMacroAssembler::pop_f(FloatRegister r) { +++ flw(r, Address(esp, 0)); +++ addi(esp, esp, wordSize); +++} +++ +++void InterpreterMacroAssembler::pop_d(FloatRegister r) { +++ fld(r, Address(esp, 0)); +++ addi(esp, esp, 2 * Interpreter::stackElementSize); +++} +++ +++void InterpreterMacroAssembler::push_f(FloatRegister r) { +++ addi(esp, esp, -wordSize); +++ fsw(r, Address(esp, 0)); +++} +++ +++void InterpreterMacroAssembler::push_d(FloatRegister r) { +++ addi(esp, esp, -2 * wordSize); +++ fsd(r, Address(esp, 0)); +++} +++ +++void InterpreterMacroAssembler::pop(TosState state) { +++ switch (state) { +++ case atos: +++ pop_ptr(); +++ verify_oop(x10); +++ break; +++ case btos: // fall through +++ case ztos: // fall through +++ case ctos: // fall through +++ case stos: // fall through +++ case itos: +++ pop_i(); +++ break; +++ case ltos: +++ pop_l(); +++ break; +++ case ftos: +++ pop_f(); +++ break; +++ case dtos: +++ pop_d(); +++ break; +++ case vtos: +++ /* nothing to do */ +++ break; +++ default: +++ ShouldNotReachHere(); +++ } +++} +++ +++void InterpreterMacroAssembler::push(TosState state) { +++ switch (state) { +++ case atos: +++ verify_oop(x10); +++ push_ptr(); +++ break; +++ case btos: // fall through +++ case ztos: // fall through +++ case ctos: // fall through +++ case stos: // fall through +++ case itos: +++ push_i(); +++ break; +++ case ltos: +++ push_l(); +++ break; +++ case ftos: +++ push_f(); +++ break; +++ case dtos: +++ push_d(); +++ break; +++ case vtos: +++ /* nothing to do */ +++ break; +++ default: +++ ShouldNotReachHere(); +++ } +++} +++ +++// Helpers for swap and dup +++void InterpreterMacroAssembler::load_ptr(int n, Register val) { +++ ld(val, Address(esp, Interpreter::expr_offset_in_bytes(n))); +++} +++ +++void InterpreterMacroAssembler::store_ptr(int n, Register val) { +++ sd(val, Address(esp, Interpreter::expr_offset_in_bytes(n))); +++} +++ +++void InterpreterMacroAssembler::load_float(Address src) { +++ flw(f10, src); +++} +++ +++void InterpreterMacroAssembler::load_double(Address src) { +++ fld(f10, src); +++} +++ +++void InterpreterMacroAssembler::prepare_to_jump_from_interpreted() { +++ // set sender sp +++ mv(x30, sp); +++ // record last_sp +++ sd(esp, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize)); +++} +++ +++// Jump to from_interpreted entry of a call unless single stepping is possible +++// in this thread in which case we must call the i2i entry +++void InterpreterMacroAssembler::jump_from_interpreted(Register method) { +++ prepare_to_jump_from_interpreted(); +++ if (JvmtiExport::can_post_interpreter_events()) { +++ Label run_compiled_code; +++ // JVMTI events, such as single-stepping, are implemented partly by avoiding running +++ // compiled code in threads for which the event is enabled. Check here for +++ // interp_only_mode if these events CAN be enabled. +++ lwu(t0, Address(xthread, JavaThread::interp_only_mode_offset())); +++ beqz(t0, run_compiled_code); +++ ld(t0, Address(method, Method::interpreter_entry_offset())); +++ jr(t0); +++ bind(run_compiled_code); +++ } +++ +++ ld(t0, Address(method, Method::from_interpreted_offset())); +++ jr(t0); +++} +++ +++// The following two routines provide a hook so that an implementation +++// can schedule the dispatch in two parts. amd64 does not do this. +++void InterpreterMacroAssembler::dispatch_prolog(TosState state, int step) { +++} +++ +++void InterpreterMacroAssembler::dispatch_epilog(TosState state, int step) { +++ dispatch_next(state, step); +++} +++ +++void InterpreterMacroAssembler::dispatch_base(TosState state, +++ address* table, +++ bool verifyoop, +++ bool generate_poll, +++ Register Rs) { +++ // Pay attention to the argument Rs, which is acquiesce in t0. +++ if (VerifyActivationFrameSize) { +++ Unimplemented(); +++ } +++ if (verifyoop && state == atos) { +++ verify_oop(x10); +++ } +++ +++ Label safepoint; +++ address* const safepoint_table = Interpreter::safept_table(state); +++ bool needs_thread_local_poll = generate_poll && table != safepoint_table; +++ +++ if (needs_thread_local_poll) { +++ NOT_PRODUCT(block_comment("Thread-local Safepoint poll")); +++ ld(t1, Address(xthread, JavaThread::polling_word_offset())); +++ test_bit(t1, t1, exact_log2(SafepointMechanism::poll_bit())); +++ bnez(t1, safepoint); +++ } +++ if (table == Interpreter::dispatch_table(state)) { +++ mv(t1, Interpreter::distance_from_dispatch_table(state)); +++ add(t1, Rs, t1); +++ shadd(t1, t1, xdispatch, t1, 3); +++ } else { +++ mv(t1, (address)table); +++ shadd(t1, Rs, t1, Rs, 3); +++ } +++ ld(t1, Address(t1)); +++ jr(t1); +++ +++ if (needs_thread_local_poll) { +++ bind(safepoint); +++ la(t1, ExternalAddress((address)safepoint_table)); +++ shadd(t1, Rs, t1, Rs, 3); +++ ld(t1, Address(t1)); +++ jr(t1); +++ } +++} +++ +++void InterpreterMacroAssembler::dispatch_only(TosState state, bool generate_poll, Register Rs) { +++ dispatch_base(state, Interpreter::dispatch_table(state), true, generate_poll, Rs); +++} +++ +++void InterpreterMacroAssembler::dispatch_only_normal(TosState state, Register Rs) { +++ dispatch_base(state, Interpreter::normal_table(state), Rs); +++} +++ +++void InterpreterMacroAssembler::dispatch_only_noverify(TosState state, Register Rs) { +++ dispatch_base(state, Interpreter::normal_table(state), false, Rs); +++} +++ +++void InterpreterMacroAssembler::dispatch_next(TosState state, int step, bool generate_poll) { +++ // load next bytecode +++ load_unsigned_byte(t0, Address(xbcp, step)); +++ add(xbcp, xbcp, step); +++ dispatch_base(state, Interpreter::dispatch_table(state), true, generate_poll); +++} +++ +++void InterpreterMacroAssembler::dispatch_via(TosState state, address* table) { +++ // load current bytecode +++ lbu(t0, Address(xbcp, 0)); +++ dispatch_base(state, table); +++} +++ +++// remove activation +++// +++// Apply stack watermark barrier. +++// Unlock the receiver if this is a synchronized method. +++// Unlock any Java monitors from syncronized blocks. +++// Remove the activation from the stack. +++// +++// If there are locked Java monitors +++// If throw_monitor_exception +++// throws IllegalMonitorStateException +++// Else if install_monitor_exception +++// installs IllegalMonitorStateException +++// Else +++// no error processing +++void InterpreterMacroAssembler::remove_activation( +++ TosState state, +++ bool throw_monitor_exception, +++ bool install_monitor_exception, +++ bool notify_jvmdi) { +++ // Note: Registers x13 may be in use for the +++ // result check if synchronized method +++ Label unlocked, unlock, no_unlock; +++ +++ // The below poll is for the stack watermark barrier. It allows fixing up frames lazily, +++ // that would normally not be safe to use. Such bad returns into unsafe territory of +++ // the stack, will call InterpreterRuntime::at_unwind. +++ Label slow_path; +++ Label fast_path; +++ safepoint_poll(slow_path, true /* at_return */, false /* acquire */, false /* in_nmethod */); +++ j(fast_path); +++ +++ bind(slow_path); +++ push(state); +++ set_last_Java_frame(esp, fp, (address)pc(), t0); +++ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::at_unwind), xthread); +++ reset_last_Java_frame(true); +++ pop(state); +++ +++ bind(fast_path); +++ +++ // get the value of _do_not_unlock_if_synchronized into x13 +++ const Address do_not_unlock_if_synchronized(xthread, +++ in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); +++ lbu(x13, do_not_unlock_if_synchronized); +++ sb(zr, do_not_unlock_if_synchronized); // reset the flag +++ +++ // get method access flags +++ ld(x11, Address(fp, frame::interpreter_frame_method_offset * wordSize)); +++ ld(x12, Address(x11, Method::access_flags_offset())); +++ test_bit(t0, x12, exact_log2(JVM_ACC_SYNCHRONIZED)); +++ beqz(t0, unlocked); +++ +++ // Don't unlock anything if the _do_not_unlock_if_synchronized flag +++ // is set. +++ bnez(x13, no_unlock); +++ +++ // unlock monitor +++ push(state); // save result +++ +++ // BasicObjectLock will be first in list, since this is a +++ // synchronized method. However, need to check that the object has +++ // not been unlocked by an explicit monitorexit bytecode. +++ const Address monitor(fp, frame::interpreter_frame_initial_sp_offset * +++ wordSize - (int) sizeof(BasicObjectLock)); +++ // We use c_rarg1 so that if we go slow path it will be the correct +++ // register for unlock_object to pass to VM directly +++ la(c_rarg1, monitor); // address of first monitor +++ +++ ld(x10, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes())); +++ bnez(x10, unlock); +++ +++ pop(state); +++ if (throw_monitor_exception) { +++ // Entry already unlocked, need to throw exception +++ call_VM(noreg, CAST_FROM_FN_PTR(address, +++ InterpreterRuntime::throw_illegal_monitor_state_exception)); +++ should_not_reach_here(); +++ } else { +++ // Monitor already unlocked during a stack unroll. If requested, +++ // install an illegal_monitor_state_exception. Continue with +++ // stack unrolling. +++ if (install_monitor_exception) { +++ call_VM(noreg, CAST_FROM_FN_PTR(address, +++ InterpreterRuntime::new_illegal_monitor_state_exception)); +++ } +++ j(unlocked); +++ } +++ +++ bind(unlock); +++ unlock_object(c_rarg1); +++ pop(state); +++ +++ // Check that for block-structured locking (i.e., that all locked +++ // objects has been unlocked) +++ bind(unlocked); +++ +++ // x10: Might contain return value +++ +++ // Check that all monitors are unlocked +++ { +++ Label loop, exception, entry, restart; +++ const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; +++ const Address monitor_block_top( +++ fp, frame::interpreter_frame_monitor_block_top_offset * wordSize); +++ const Address monitor_block_bot( +++ fp, frame::interpreter_frame_initial_sp_offset * wordSize); +++ +++ bind(restart); +++ // We use c_rarg1 so that if we go slow path it will be the correct +++ // register for unlock_object to pass to VM directly +++ ld(c_rarg1, monitor_block_top); // points to current entry, starting +++ // with top-most entry +++ la(x9, monitor_block_bot); // points to word before bottom of +++ // monitor block +++ +++ j(entry); +++ +++ // Entry already locked, need to throw exception +++ bind(exception); +++ +++ if (throw_monitor_exception) { +++ // Throw exception +++ MacroAssembler::call_VM(noreg, +++ CAST_FROM_FN_PTR(address, InterpreterRuntime:: +++ throw_illegal_monitor_state_exception)); +++ +++ should_not_reach_here(); +++ } else { +++ // Stack unrolling. Unlock object and install illegal_monitor_exception. +++ // Unlock does not block, so don't have to worry about the frame. +++ // We don't have to preserve c_rarg1 since we are going to throw an exception. +++ +++ push(state); +++ unlock_object(c_rarg1); +++ pop(state); +++ +++ if (install_monitor_exception) { +++ call_VM(noreg, CAST_FROM_FN_PTR(address, +++ InterpreterRuntime:: +++ new_illegal_monitor_state_exception)); +++ } +++ +++ j(restart); +++ } +++ +++ bind(loop); +++ // check if current entry is used +++ add(t0, c_rarg1, BasicObjectLock::obj_offset_in_bytes()); +++ ld(t0, Address(t0, 0)); +++ bnez(t0, exception); +++ +++ add(c_rarg1, c_rarg1, entry_size); // otherwise advance to next entry +++ bind(entry); +++ bne(c_rarg1, x9, loop); // check if bottom reached if not at bottom then check this entry +++ } +++ +++ bind(no_unlock); +++ +++ // jvmti support +++ if (notify_jvmdi) { +++ notify_method_exit(state, NotifyJVMTI); // preserve TOSCA +++ +++ } else { +++ notify_method_exit(state, SkipNotifyJVMTI); // preserve TOSCA +++ } +++ +++ // remove activation +++ // get sender esp +++ ld(t1, +++ Address(fp, frame::interpreter_frame_sender_sp_offset * wordSize)); +++ if (StackReservedPages > 0) { +++ // testing if reserved zone needs to be re-enabled +++ Label no_reserved_zone_enabling; +++ +++ ld(t0, Address(xthread, JavaThread::reserved_stack_activation_offset())); +++ ble(t1, t0, no_reserved_zone_enabling); +++ +++ call_VM_leaf( +++ CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), xthread); +++ call_VM(noreg, CAST_FROM_FN_PTR(address, +++ InterpreterRuntime::throw_delayed_StackOverflowError)); +++ should_not_reach_here(); +++ +++ bind(no_reserved_zone_enabling); +++ } +++ +++ // restore sender esp +++ mv(esp, t1); +++ +++ // remove frame anchor +++ leave(); +++ // If we're returning to interpreted code we will shortly be +++ // adjusting SP to allow some space for ESP. If we're returning to +++ // compiled code the saved sender SP was saved in sender_sp, so this +++ // restores it. +++ andi(sp, esp, -16); +++} +++ +++// Lock object +++// +++// Args: +++// c_rarg1: BasicObjectLock to be used for locking +++// +++// Kills: +++// x10 +++// c_rarg0, c_rarg1, c_rarg2, c_rarg3, .. (param regs) +++// t0, t1 (temp regs) +++void InterpreterMacroAssembler::lock_object(Register lock_reg) +++{ +++ assert(lock_reg == c_rarg1, "The argument is only for looks. It must be c_rarg1"); +++ if (UseHeavyMonitors) { +++ call_VM(noreg, +++ CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), +++ lock_reg); +++ } else { +++ Label done; +++ +++ const Register swap_reg = x10; +++ const Register tmp = c_rarg2; +++ const Register obj_reg = c_rarg3; // Will contain the oop +++ +++ const int obj_offset = BasicObjectLock::obj_offset_in_bytes(); +++ const int lock_offset = BasicObjectLock::lock_offset_in_bytes (); +++ const int mark_offset = lock_offset + +++ BasicLock::displaced_header_offset_in_bytes(); +++ +++ Label slow_case; +++ +++ // Load object pointer into obj_reg c_rarg3 +++ ld(obj_reg, Address(lock_reg, obj_offset)); +++ +++ if (DiagnoseSyncOnValueBasedClasses != 0) { +++ load_klass(tmp, obj_reg); +++ lwu(tmp, Address(tmp, Klass::access_flags_offset())); +++ test_bit(tmp, tmp, exact_log2(JVM_ACC_IS_VALUE_BASED_CLASS)); +++ bnez(tmp, slow_case); +++ } +++ +++ if (UseBiasedLocking) { +++ biased_locking_enter(lock_reg, obj_reg, swap_reg, tmp, false, done, &slow_case); +++ } +++ +++ // Load (object->mark() | 1) into swap_reg +++ ld(t0, Address(obj_reg, oopDesc::mark_offset_in_bytes())); +++ ori(swap_reg, t0, 1); +++ +++ // Save (object->mark() | 1) into BasicLock's displaced header +++ sd(swap_reg, Address(lock_reg, mark_offset)); +++ +++ assert(lock_offset == 0, +++ "displached header must be first word in BasicObjectLock"); +++ +++ cmpxchg_obj_header(swap_reg, lock_reg, obj_reg, t0, done, /*fallthrough*/NULL); +++ +++ // Test if the oopMark is an obvious stack pointer, i.e., +++ // 1) (mark & 7) == 0, and +++ // 2) sp <= mark < mark + os::pagesize() +++ // +++ // These 3 tests can be done by evaluating the following +++ // expression: ((mark - sp) & (7 - os::vm_page_size())), +++ // assuming both stack pointer and pagesize have their +++ // least significant 3 bits clear. +++ // NOTE: the oopMark is in swap_reg x10 as the result of cmpxchg +++ sub(swap_reg, swap_reg, sp); +++ mv(t0, (int64_t)(7 - os::vm_page_size())); +++ andr(swap_reg, swap_reg, t0); +++ +++ // Save the test result, for recursive case, the result is zero +++ sd(swap_reg, Address(lock_reg, mark_offset)); +++ beqz(swap_reg, done); +++ +++ bind(slow_case); +++ +++ // Call the runtime routine for slow case +++ call_VM(noreg, +++ CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), +++ lock_reg); +++ +++ bind(done); +++ } +++} +++ +++ +++// Unlocks an object. Used in monitorexit bytecode and +++// remove_activation. Throws an IllegalMonitorException if object is +++// not locked by current thread. +++// +++// Args: +++// c_rarg1: BasicObjectLock for lock +++// +++// Kills: +++// x10 +++// c_rarg0, c_rarg1, c_rarg2, c_rarg3, ... (param regs) +++// t0, t1 (temp regs) +++void InterpreterMacroAssembler::unlock_object(Register lock_reg) +++{ +++ assert(lock_reg == c_rarg1, "The argument is only for looks. It must be rarg1"); +++ +++ if (UseHeavyMonitors) { +++ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg); +++ } else { +++ Label done; +++ +++ const Register swap_reg = x10; +++ const Register header_reg = c_rarg2; // Will contain the old oopMark +++ const Register obj_reg = c_rarg3; // Will contain the oop +++ +++ save_bcp(); // Save in case of exception +++ +++ // Convert from BasicObjectLock structure to object and BasicLock +++ // structure Store the BasicLock address into x10 +++ la(swap_reg, Address(lock_reg, BasicObjectLock::lock_offset_in_bytes())); +++ +++ // Load oop into obj_reg(c_rarg3) +++ ld(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset_in_bytes())); +++ +++ // Free entry +++ sd(zr, Address(lock_reg, BasicObjectLock::obj_offset_in_bytes())); +++ +++ if (UseBiasedLocking) { +++ biased_locking_exit(obj_reg, header_reg, done); +++ } +++ +++ // Load the old header from BasicLock structure +++ ld(header_reg, Address(swap_reg, +++ BasicLock::displaced_header_offset_in_bytes())); +++ +++ // Test for recursion +++ beqz(header_reg, done); +++ +++ // Atomic swap back the old header +++ cmpxchg_obj_header(swap_reg, header_reg, obj_reg, t0, done, /*fallthrough*/NULL); +++ +++ // Call the runtime routine for slow case. +++ sd(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset_in_bytes())); // restore obj +++ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg); +++ +++ bind(done); +++ +++ restore_bcp(); +++ } +++} +++ +++ +++void InterpreterMacroAssembler::test_method_data_pointer(Register mdp, +++ Label& zero_continue) { +++ assert(ProfileInterpreter, "must be profiling interpreter"); +++ ld(mdp, Address(fp, frame::interpreter_frame_mdp_offset * wordSize)); +++ beqz(mdp, zero_continue); +++} +++ +++// Set the method data pointer for the current bcp. +++void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() { +++ assert(ProfileInterpreter, "must be profiling interpreter"); +++ Label set_mdp; +++ push_reg(RegSet::of(x10, x11), sp); // save x10, x11 +++ +++ // Test MDO to avoid the call if it is NULL. +++ ld(x10, Address(xmethod, in_bytes(Method::method_data_offset()))); +++ beqz(x10, set_mdp); +++ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), xmethod, xbcp); +++ // x10: mdi +++ // mdo is guaranteed to be non-zero here, we checked for it before the call. +++ ld(x11, Address(xmethod, in_bytes(Method::method_data_offset()))); +++ la(x11, Address(x11, in_bytes(MethodData::data_offset()))); +++ add(x10, x11, x10); +++ sd(x10, Address(fp, frame::interpreter_frame_mdp_offset * wordSize)); +++ bind(set_mdp); +++ pop_reg(RegSet::of(x10, x11), sp); +++} +++ +++void InterpreterMacroAssembler::verify_method_data_pointer() { +++ assert(ProfileInterpreter, "must be profiling interpreter"); +++#ifdef ASSERT +++ Label verify_continue; +++ add(sp, sp, -4 * wordSize); +++ sd(x10, Address(sp, 0)); +++ sd(x11, Address(sp, wordSize)); +++ sd(x12, Address(sp, 2 * wordSize)); +++ sd(x13, Address(sp, 3 * wordSize)); +++ test_method_data_pointer(x13, verify_continue); // If mdp is zero, continue +++ get_method(x11); +++ +++ // If the mdp is valid, it will point to a DataLayout header which is +++ // consistent with the bcp. The converse is highly probable also. +++ lh(x12, Address(x13, in_bytes(DataLayout::bci_offset()))); +++ ld(t0, Address(x11, Method::const_offset())); +++ add(x12, x12, t0); +++ la(x12, Address(x12, ConstMethod::codes_offset())); +++ beq(x12, xbcp, verify_continue); +++ // x10: method +++ // xbcp: bcp // xbcp == 22 +++ // x13: mdp +++ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::verify_mdp), +++ x11, xbcp, x13); +++ bind(verify_continue); +++ ld(x10, Address(sp, 0)); +++ ld(x11, Address(sp, wordSize)); +++ ld(x12, Address(sp, 2 * wordSize)); +++ ld(x13, Address(sp, 3 * wordSize)); +++ add(sp, sp, 4 * wordSize); +++#endif // ASSERT +++} +++ +++ +++void InterpreterMacroAssembler::set_mdp_data_at(Register mdp_in, +++ int constant, +++ Register value) { +++ assert(ProfileInterpreter, "must be profiling interpreter"); +++ Address data(mdp_in, constant); +++ sd(value, data); +++} +++ +++ +++void InterpreterMacroAssembler::increment_mdp_data_at(Register mdp_in, +++ int constant, +++ bool decrement) { +++ increment_mdp_data_at(mdp_in, noreg, constant, decrement); +++} +++ +++void InterpreterMacroAssembler::increment_mdp_data_at(Register mdp_in, +++ Register reg, +++ int constant, +++ bool decrement) { +++ assert(ProfileInterpreter, "must be profiling interpreter"); +++ // %%% this does 64bit counters at best it is wasting space +++ // at worst it is a rare bug when counters overflow +++ +++ assert_different_registers(t1, t0, mdp_in, reg); +++ +++ Address addr1(mdp_in, constant); +++ Address addr2(t1, 0); +++ Address &addr = addr1; +++ if (reg != noreg) { +++ la(t1, addr1); +++ add(t1, t1, reg); +++ addr = addr2; +++ } +++ +++ if (decrement) { +++ ld(t0, addr); +++ addi(t0, t0, -DataLayout::counter_increment); +++ Label L; +++ bltz(t0, L); // skip store if counter underflow +++ sd(t0, addr); +++ bind(L); +++ } else { +++ assert(DataLayout::counter_increment == 1, +++ "flow-free idiom only works with 1"); +++ ld(t0, addr); +++ addi(t0, t0, DataLayout::counter_increment); +++ Label L; +++ blez(t0, L); // skip store if counter overflow +++ sd(t0, addr); +++ bind(L); +++ } +++} +++ +++void InterpreterMacroAssembler::set_mdp_flag_at(Register mdp_in, +++ int flag_byte_constant) { +++ assert(ProfileInterpreter, "must be profiling interpreter"); +++ int flags_offset = in_bytes(DataLayout::flags_offset()); +++ // Set the flag +++ lbu(t1, Address(mdp_in, flags_offset)); +++ ori(t1, t1, flag_byte_constant); +++ sb(t1, Address(mdp_in, flags_offset)); +++} +++ +++ +++void InterpreterMacroAssembler::test_mdp_data_at(Register mdp_in, +++ int offset, +++ Register value, +++ Register test_value_out, +++ Label& not_equal_continue) { +++ assert(ProfileInterpreter, "must be profiling interpreter"); +++ if (test_value_out == noreg) { +++ ld(t1, Address(mdp_in, offset)); +++ bne(value, t1, not_equal_continue); +++ } else { +++ // Put the test value into a register, so caller can use it: +++ ld(test_value_out, Address(mdp_in, offset)); +++ bne(value, test_value_out, not_equal_continue); +++ } +++} +++ +++ +++void InterpreterMacroAssembler::update_mdp_by_offset(Register mdp_in, +++ int offset_of_disp) { +++ assert(ProfileInterpreter, "must be profiling interpreter"); +++ ld(t1, Address(mdp_in, offset_of_disp)); +++ add(mdp_in, mdp_in, t1); +++ sd(mdp_in, Address(fp, frame::interpreter_frame_mdp_offset * wordSize)); +++} +++ +++void InterpreterMacroAssembler::update_mdp_by_offset(Register mdp_in, +++ Register reg, +++ int offset_of_disp) { +++ assert(ProfileInterpreter, "must be profiling interpreter"); +++ add(t1, mdp_in, reg); +++ ld(t1, Address(t1, offset_of_disp)); +++ add(mdp_in, mdp_in, t1); +++ sd(mdp_in, Address(fp, frame::interpreter_frame_mdp_offset * wordSize)); +++} +++ +++ +++void InterpreterMacroAssembler::update_mdp_by_constant(Register mdp_in, +++ int constant) { +++ assert(ProfileInterpreter, "must be profiling interpreter"); +++ addi(mdp_in, mdp_in, (unsigned)constant); +++ sd(mdp_in, Address(fp, frame::interpreter_frame_mdp_offset * wordSize)); +++} +++ +++ +++void InterpreterMacroAssembler::update_mdp_for_ret(Register return_bci) { +++ assert(ProfileInterpreter, "must be profiling interpreter"); +++ +++ // save/restore across call_VM +++ addi(sp, sp, -2 * wordSize); +++ sd(zr, Address(sp, 0)); +++ sd(return_bci, Address(sp, wordSize)); +++ call_VM(noreg, +++ CAST_FROM_FN_PTR(address, InterpreterRuntime::update_mdp_for_ret), +++ return_bci); +++ ld(zr, Address(sp, 0)); +++ ld(return_bci, Address(sp, wordSize)); +++ addi(sp, sp, 2 * wordSize); +++} +++ +++void InterpreterMacroAssembler::profile_taken_branch(Register mdp, +++ Register bumped_count) { +++ if (ProfileInterpreter) { +++ Label profile_continue; +++ +++ // If no method data exists, go to profile_continue. +++ // Otherwise, assign to mdp +++ test_method_data_pointer(mdp, profile_continue); +++ +++ // We are taking a branch. Increment the taken count. +++ Address data(mdp, in_bytes(JumpData::taken_offset())); +++ ld(bumped_count, data); +++ assert(DataLayout::counter_increment == 1, +++ "flow-free idiom only works with 1"); +++ addi(bumped_count, bumped_count, DataLayout::counter_increment); +++ Label L; +++ // eg: bumped_count=0x7fff ffff ffff ffff + 1 < 0. so we use <= 0; +++ blez(bumped_count, L); // skip store if counter overflow, +++ sd(bumped_count, data); +++ bind(L); +++ // The method data pointer needs to be updated to reflect the new target. +++ update_mdp_by_offset(mdp, in_bytes(JumpData::displacement_offset())); +++ bind(profile_continue); +++ } +++} +++ +++void InterpreterMacroAssembler::profile_not_taken_branch(Register mdp) { +++ if (ProfileInterpreter) { +++ Label profile_continue; +++ +++ // If no method data exists, go to profile_continue. +++ test_method_data_pointer(mdp, profile_continue); +++ +++ // We are taking a branch. Increment the not taken count. +++ increment_mdp_data_at(mdp, in_bytes(BranchData::not_taken_offset())); +++ +++ // The method data pointer needs to be updated to correspond to +++ // the next bytecode +++ update_mdp_by_constant(mdp, in_bytes(BranchData::branch_data_size())); +++ bind(profile_continue); +++ } +++} +++ +++void InterpreterMacroAssembler::profile_call(Register mdp) { +++ if (ProfileInterpreter) { +++ Label profile_continue; +++ +++ // If no method data exists, go to profile_continue. +++ test_method_data_pointer(mdp, profile_continue); +++ +++ // We are making a call. Increment the count. +++ increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset())); +++ +++ // The method data pointer needs to be updated to reflect the new target. +++ update_mdp_by_constant(mdp, in_bytes(CounterData::counter_data_size())); +++ bind(profile_continue); +++ } +++} +++ +++void InterpreterMacroAssembler::profile_final_call(Register mdp) { +++ if (ProfileInterpreter) { +++ Label profile_continue; +++ +++ // If no method data exists, go to profile_continue. +++ test_method_data_pointer(mdp, profile_continue); +++ +++ // We are making a call. Increment the count. +++ increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset())); +++ +++ // The method data pointer needs to be updated to reflect the new target. +++ update_mdp_by_constant(mdp, +++ in_bytes(VirtualCallData:: +++ virtual_call_data_size())); +++ bind(profile_continue); +++ } +++} +++ +++ +++void InterpreterMacroAssembler::profile_virtual_call(Register receiver, +++ Register mdp, +++ Register reg2, +++ bool receiver_can_be_null) { +++ if (ProfileInterpreter) { +++ Label profile_continue; +++ +++ // If no method data exists, go to profile_continue. +++ test_method_data_pointer(mdp, profile_continue); +++ +++ Label skip_receiver_profile; +++ if (receiver_can_be_null) { +++ Label not_null; +++ // We are making a call. Increment the count for null receiver. +++ increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset())); +++ j(skip_receiver_profile); +++ bind(not_null); +++ } +++ +++ // Record the receiver type. +++ record_klass_in_profile(receiver, mdp, reg2, true); +++ bind(skip_receiver_profile); +++ +++ // The method data pointer needs to be updated to reflect the new target. +++ +++ update_mdp_by_constant(mdp, +++ in_bytes(VirtualCallData:: +++ virtual_call_data_size())); +++ bind(profile_continue); +++ } +++} +++ +++// This routine creates a state machine for updating the multi-row +++// type profile at a virtual call site (or other type-sensitive bytecode). +++// The machine visits each row (of receiver/count) until the receiver type +++// is found, or until it runs out of rows. At the same time, it remembers +++// the location of the first empty row. (An empty row records null for its +++// receiver, and can be allocated for a newly-observed receiver type.) +++// Because there are two degrees of freedom in the state, a simple linear +++// search will not work; it must be a decision tree. Hence this helper +++// function is recursive, to generate the required tree structured code. +++// It's the interpreter, so we are trading off code space for speed. +++// See below for example code. +++void InterpreterMacroAssembler::record_klass_in_profile_helper( +++ Register receiver, Register mdp, +++ Register reg2, +++ Label& done, bool is_virtual_call) { +++ if (TypeProfileWidth == 0) { +++ if (is_virtual_call) { +++ increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset())); +++ } +++ +++ } else { +++ int non_profiled_offset = -1; +++ if (is_virtual_call) { +++ non_profiled_offset = in_bytes(CounterData::count_offset()); +++ } +++ +++ record_item_in_profile_helper(receiver, mdp, reg2, 0, done, TypeProfileWidth, +++ &VirtualCallData::receiver_offset, &VirtualCallData::receiver_count_offset, non_profiled_offset); +++ } +++} +++ +++void InterpreterMacroAssembler::record_item_in_profile_helper( +++ Register item, Register mdp, Register reg2, int start_row, Label& done, int total_rows, +++ OffsetFunction item_offset_fn, OffsetFunction item_count_offset_fn, int non_profiled_offset) { +++ int last_row = total_rows - 1; +++ assert(start_row <= last_row, "must be work left to do"); +++ // Test this row for both the item and for null. +++ // Take any of three different outcomes: +++ // 1. found item => increment count and goto done +++ // 2. found null => keep looking for case 1, maybe allocate this cell +++ // 3. found something else => keep looking for cases 1 and 2 +++ // Case 3 is handled by a recursive call. +++ for (int row = start_row; row <= last_row; row++) { +++ Label next_test; +++ bool test_for_null_also = (row == start_row); +++ +++ // See if the item is item[n]. +++ int item_offset = in_bytes(item_offset_fn(row)); +++ test_mdp_data_at(mdp, item_offset, item, +++ (test_for_null_also ? reg2 : noreg), +++ next_test); +++ // (Reg2 now contains the item from the CallData.) +++ +++ // The item is item[n]. Increment count[n]. +++ int count_offset = in_bytes(item_count_offset_fn(row)); +++ increment_mdp_data_at(mdp, count_offset); +++ j(done); +++ bind(next_test); +++ +++ if (test_for_null_also) { +++ Label found_null; +++ // Failed the equality check on item[n]... Test for null. +++ if (start_row == last_row) { +++ // The only thing left to do is handle the null case. +++ if (non_profiled_offset >= 0) { +++ beqz(reg2, found_null); +++ // Item did not match any saved item and there is no empty row for it. +++ // Increment total counter to indicate polymorphic case. +++ increment_mdp_data_at(mdp, non_profiled_offset); +++ j(done); +++ bind(found_null); +++ } else { +++ bnez(reg2, done); +++ } +++ break; +++ } +++ // Since null is rare, make it be the branch-taken case. +++ beqz(reg2, found_null); +++ +++ // Put all the "Case 3" tests here. +++ record_item_in_profile_helper(item, mdp, reg2, start_row + 1, done, total_rows, +++ item_offset_fn, item_count_offset_fn, non_profiled_offset); +++ +++ // Found a null. Keep searching for a matching item, +++ // but remember that this is an empty (unused) slot. +++ bind(found_null); +++ } +++ } +++ +++ // In the fall-through case, we found no matching item, but we +++ // observed the item[start_row] is NULL. +++ // Fill in the item field and increment the count. +++ int item_offset = in_bytes(item_offset_fn(start_row)); +++ set_mdp_data_at(mdp, item_offset, item); +++ int count_offset = in_bytes(item_count_offset_fn(start_row)); +++ mv(reg2, DataLayout::counter_increment); +++ set_mdp_data_at(mdp, count_offset, reg2); +++ if (start_row > 0) { +++ j(done); +++ } +++} +++ +++// Example state machine code for three profile rows: +++// # main copy of decision tree, rooted at row[1] +++// if (row[0].rec == rec) then [ +++// row[0].incr() +++// goto done +++// ] +++// if (row[0].rec != NULL) then [ +++// # inner copy of decision tree, rooted at row[1] +++// if (row[1].rec == rec) then [ +++// row[1].incr() +++// goto done +++// ] +++// if (row[1].rec != NULL) then [ +++// # degenerate decision tree, rooted at row[2] +++// if (row[2].rec == rec) then [ +++// row[2].incr() +++// goto done +++// ] +++// if (row[2].rec != NULL) then [ +++// count.incr() +++// goto done +++// ] # overflow +++// row[2].init(rec) +++// goto done +++// ] else [ +++// # remember row[1] is empty +++// if (row[2].rec == rec) then [ +++// row[2].incr() +++// goto done +++// ] +++// row[1].init(rec) +++// goto done +++// ] +++// else [ +++// # remember row[0] is empty +++// if (row[1].rec == rec) then [ +++// row[1].incr() +++// goto done +++// ] +++// if (row[2].rec == rec) then [ +++// row[2].incr() +++// goto done +++// ] +++// row[0].init(rec) +++// goto done +++// ] +++// done: +++ +++void InterpreterMacroAssembler::record_klass_in_profile(Register receiver, +++ Register mdp, Register reg2, +++ bool is_virtual_call) { +++ assert(ProfileInterpreter, "must be profiling"); +++ Label done; +++ +++ record_klass_in_profile_helper(receiver, mdp, reg2, done, is_virtual_call); +++ +++ bind(done); +++} +++ +++void InterpreterMacroAssembler::profile_ret(Register return_bci, Register mdp) { +++ if (ProfileInterpreter) { +++ Label profile_continue; +++ +++ // If no method data exists, go to profile_continue. +++ test_method_data_pointer(mdp, profile_continue); +++ +++ // Update the total ret count. +++ increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset())); +++ +++ for (uint row = 0; row < RetData::row_limit(); row++) { +++ Label next_test; +++ +++ // See if return_bci is equal to bci[n]: +++ test_mdp_data_at(mdp, +++ in_bytes(RetData::bci_offset(row)), +++ return_bci, noreg, +++ next_test); +++ +++ // return_bci is equal to bci[n]. Increment the count. +++ increment_mdp_data_at(mdp, in_bytes(RetData::bci_count_offset(row))); +++ +++ // The method data pointer needs to be updated to reflect the new target. +++ update_mdp_by_offset(mdp, +++ in_bytes(RetData::bci_displacement_offset(row))); +++ j(profile_continue); +++ bind(next_test); +++ } +++ +++ update_mdp_for_ret(return_bci); +++ +++ bind(profile_continue); +++ } +++} +++ +++void InterpreterMacroAssembler::profile_null_seen(Register mdp) { +++ if (ProfileInterpreter) { +++ Label profile_continue; +++ +++ // If no method data exists, go to profile_continue. +++ test_method_data_pointer(mdp, profile_continue); +++ +++ set_mdp_flag_at(mdp, BitData::null_seen_byte_constant()); +++ +++ // The method data pointer needs to be updated. +++ int mdp_delta = in_bytes(BitData::bit_data_size()); +++ if (TypeProfileCasts) { +++ mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size()); +++ } +++ update_mdp_by_constant(mdp, mdp_delta); +++ +++ bind(profile_continue); +++ } +++} +++ +++void InterpreterMacroAssembler::profile_typecheck_failed(Register mdp) { +++ if (ProfileInterpreter && TypeProfileCasts) { +++ Label profile_continue; +++ +++ // If no method data exists, go to profile_continue. +++ test_method_data_pointer(mdp, profile_continue); +++ +++ int count_offset = in_bytes(CounterData::count_offset()); +++ // Back up the address, since we have already bumped the mdp. +++ count_offset -= in_bytes(VirtualCallData::virtual_call_data_size()); +++ +++ // *Decrement* the counter. We expect to see zero or small negatives. +++ increment_mdp_data_at(mdp, count_offset, true); +++ +++ bind (profile_continue); +++ } +++} +++ +++void InterpreterMacroAssembler::profile_typecheck(Register mdp, Register klass, Register reg2) { +++ if (ProfileInterpreter) { +++ Label profile_continue; +++ +++ // If no method data exists, go to profile_continue. +++ test_method_data_pointer(mdp, profile_continue); +++ +++ // The method data pointer needs to be updated. +++ int mdp_delta = in_bytes(BitData::bit_data_size()); +++ if (TypeProfileCasts) { +++ mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size()); +++ +++ // Record the object type. +++ record_klass_in_profile(klass, mdp, reg2, false); +++ } +++ update_mdp_by_constant(mdp, mdp_delta); +++ +++ bind(profile_continue); +++ } +++} +++ +++void InterpreterMacroAssembler::profile_switch_default(Register mdp) { +++ if (ProfileInterpreter) { +++ Label profile_continue; +++ +++ // If no method data exists, go to profile_continue. +++ test_method_data_pointer(mdp, profile_continue); +++ +++ // Update the default case count +++ increment_mdp_data_at(mdp, +++ in_bytes(MultiBranchData::default_count_offset())); +++ +++ // The method data pointer needs to be updated. +++ update_mdp_by_offset(mdp, +++ in_bytes(MultiBranchData:: +++ default_displacement_offset())); +++ +++ bind(profile_continue); +++ } +++} +++ +++void InterpreterMacroAssembler::profile_switch_case(Register index, +++ Register mdp, +++ Register reg2) { +++ if (ProfileInterpreter) { +++ Label profile_continue; +++ +++ // If no method data exists, go to profile_continue. +++ test_method_data_pointer(mdp, profile_continue); +++ +++ // Build the base (index * per_case_size_in_bytes()) + +++ // case_array_offset_in_bytes() +++ mv(reg2, in_bytes(MultiBranchData::per_case_size())); +++ mv(t0, in_bytes(MultiBranchData::case_array_offset())); +++ Assembler::mul(index, index, reg2); +++ Assembler::add(index, index, t0); +++ +++ // Update the case count +++ increment_mdp_data_at(mdp, +++ index, +++ in_bytes(MultiBranchData::relative_count_offset())); +++ +++ // The method data pointer need to be updated. +++ update_mdp_by_offset(mdp, +++ index, +++ in_bytes(MultiBranchData:: +++ relative_displacement_offset())); +++ +++ bind(profile_continue); +++ } +++} +++ +++void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) { ; } +++ +++void InterpreterMacroAssembler::notify_method_entry() { +++ // Whenever JVMTI is interp_only_mode, method entry/exit events are sent to +++ // track stack depth. If it is possible to enter interp_only_mode we add +++ // the code to check if the event should be sent. +++ if (JvmtiExport::can_post_interpreter_events()) { +++ Label L; +++ lwu(x13, Address(xthread, JavaThread::interp_only_mode_offset())); +++ beqz(x13, L); +++ call_VM(noreg, CAST_FROM_FN_PTR(address, +++ InterpreterRuntime::post_method_entry)); +++ bind(L); +++ } +++ +++ { +++ SkipIfEqual skip(this, &DTraceMethodProbes, false); +++ get_method(c_rarg1); +++ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), +++ xthread, c_rarg1); +++ } +++ +++ // RedefineClasses() tracing support for obsolete method entry +++ if (log_is_enabled(Trace, redefine, class, obsolete)) { +++ get_method(c_rarg1); +++ call_VM_leaf( +++ CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry), +++ xthread, c_rarg1); +++ } +++} +++ +++ +++void InterpreterMacroAssembler::notify_method_exit( +++ TosState state, NotifyMethodExitMode mode) { +++ // Whenever JVMTI is interp_only_mode, method entry/exit events are sent to +++ // track stack depth. If it is possible to enter interp_only_mode we add +++ // the code to check if the event should be sent. +++ if (mode == NotifyJVMTI && JvmtiExport::can_post_interpreter_events()) { +++ Label L; +++ // Note: frame::interpreter_frame_result has a dependency on how the +++ // method result is saved across the call to post_method_exit. If this +++ // is changed then the interpreter_frame_result implementation will +++ // need to be updated too. +++ +++ // template interpreter will leave the result on the top of the stack. +++ push(state); +++ lwu(x13, Address(xthread, JavaThread::interp_only_mode_offset())); +++ beqz(x13, L); +++ call_VM(noreg, +++ CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_exit)); +++ bind(L); +++ pop(state); +++ } +++ +++ { +++ SkipIfEqual skip(this, &DTraceMethodProbes, false); +++ push(state); +++ get_method(c_rarg1); +++ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), +++ xthread, c_rarg1); +++ pop(state); +++ } +++} +++ +++ +++// Jump if ((*counter_addr += increment) & mask) satisfies the condition. +++void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr, +++ int increment, Address mask, +++ Register tmp1, Register tmp2, +++ bool preloaded, Label* where) { +++ Label done; +++ if (!preloaded) { +++ lwu(tmp1, counter_addr); +++ } +++ add(tmp1, tmp1, increment); +++ sw(tmp1, counter_addr); +++ lwu(tmp2, mask); +++ andr(tmp1, tmp1, tmp2); +++ bnez(tmp1, done); +++ j(*where); // offset is too large so we have to use j instead of beqz here +++ bind(done); +++} +++ +++void InterpreterMacroAssembler::call_VM_leaf_base(address entry_point, +++ int number_of_arguments) { +++ // interpreter specific +++ // +++ // Note: No need to save/restore rbcp & rlocals pointer since these +++ // are callee saved registers and no blocking/ GC can happen +++ // in leaf calls. +++#ifdef ASSERT +++ { +++ Label L; +++ ld(t0, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize)); +++ beqz(t0, L); +++ stop("InterpreterMacroAssembler::call_VM_leaf_base:" +++ " last_sp != NULL"); +++ bind(L); +++ } +++#endif /* ASSERT */ +++ // super call +++ MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments); +++} +++ +++void InterpreterMacroAssembler::call_VM_base(Register oop_result, +++ Register java_thread, +++ Register last_java_sp, +++ address entry_point, +++ int number_of_arguments, +++ bool check_exceptions) { +++ // interpreter specific +++ // +++ // Note: Could avoid restoring locals ptr (callee saved) - however doesn't +++ // really make a difference for these runtime calls, since they are +++ // slow anyway. Btw., bcp must be saved/restored since it may change +++ // due to GC. +++ save_bcp(); +++#ifdef ASSERT +++ { +++ Label L; +++ ld(t0, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize)); +++ beqz(t0, L); +++ stop("InterpreterMacroAssembler::call_VM_base:" +++ " last_sp != NULL"); +++ bind(L); +++ } +++#endif /* ASSERT */ +++ // super call +++ MacroAssembler::call_VM_base(oop_result, noreg, last_java_sp, +++ entry_point, number_of_arguments, +++ check_exceptions); +++// interpreter specific +++ restore_bcp(); +++ restore_locals(); +++} +++ +++void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& mdo_addr, Register tmp) { +++ assert_different_registers(obj, tmp, t0, mdo_addr.base()); +++ Label update, next, none; +++ +++ verify_oop(obj); +++ +++ bnez(obj, update); +++ orptr(mdo_addr, TypeEntries::null_seen, t0, tmp); +++ j(next); +++ +++ bind(update); +++ load_klass(obj, obj); +++ +++ ld(t0, mdo_addr); +++ xorr(obj, obj, t0); +++ andi(t0, obj, TypeEntries::type_klass_mask); +++ beqz(t0, next); // klass seen before, nothing to +++ // do. The unknown bit may have been +++ // set already but no need to check. +++ +++ test_bit(t0, obj, exact_log2(TypeEntries::type_unknown)); +++ bnez(t0, next); +++ // already unknown. Nothing to do anymore. +++ +++ ld(t0, mdo_addr); +++ beqz(t0, none); +++ mv(tmp, (u1)TypeEntries::null_seen); +++ beq(t0, tmp, none); +++ // There is a chance that the checks above (re-reading profiling +++ // data from memory) fail if another thread has just set the +++ // profiling to this obj's klass +++ ld(t0, mdo_addr); +++ xorr(obj, obj, t0); +++ andi(t0, obj, TypeEntries::type_klass_mask); +++ beqz(t0, next); +++ +++ // different than before. Cannot keep accurate profile. +++ orptr(mdo_addr, TypeEntries::type_unknown, t0, tmp); +++ j(next); +++ +++ bind(none); +++ // first time here. Set profile type. +++ sd(obj, mdo_addr); +++ +++ bind(next); +++} +++ +++void InterpreterMacroAssembler::profile_arguments_type(Register mdp, Register callee, Register tmp, bool is_virtual) { +++ if (!ProfileInterpreter) { +++ return; +++ } +++ +++ if (MethodData::profile_arguments() || MethodData::profile_return()) { +++ Label profile_continue; +++ +++ test_method_data_pointer(mdp, profile_continue); +++ +++ int off_to_start = is_virtual ? in_bytes(VirtualCallData::virtual_call_data_size()) : in_bytes(CounterData::counter_data_size()); +++ +++ lbu(t0, Address(mdp, in_bytes(DataLayout::tag_offset()) - off_to_start)); +++ if (is_virtual) { +++ mv(tmp, (u1)DataLayout::virtual_call_type_data_tag); +++ bne(t0, tmp, profile_continue); +++ } else { +++ mv(tmp, (u1)DataLayout::call_type_data_tag); +++ bne(t0, tmp, profile_continue); +++ } +++ +++ // calculate slot step +++ static int stack_slot_offset0 = in_bytes(TypeEntriesAtCall::stack_slot_offset(0)); +++ static int slot_step = in_bytes(TypeEntriesAtCall::stack_slot_offset(1)) - stack_slot_offset0; +++ +++ // calculate type step +++ static int argument_type_offset0 = in_bytes(TypeEntriesAtCall::argument_type_offset(0)); +++ static int type_step = in_bytes(TypeEntriesAtCall::argument_type_offset(1)) - argument_type_offset0; +++ +++ if (MethodData::profile_arguments()) { +++ Label done, loop, loopEnd, profileArgument, profileReturnType; +++ RegSet pushed_registers; +++ pushed_registers += x15; +++ pushed_registers += x16; +++ pushed_registers += x17; +++ Register mdo_addr = x15; +++ Register index = x16; +++ Register off_to_args = x17; +++ push_reg(pushed_registers, sp); +++ +++ mv(off_to_args, in_bytes(TypeEntriesAtCall::args_data_offset())); +++ mv(t0, TypeProfileArgsLimit); +++ beqz(t0, loopEnd); +++ +++ mv(index, zr); // index < TypeProfileArgsLimit +++ bind(loop); +++ bgtz(index, profileReturnType); +++ mv(t0, (int)MethodData::profile_return()); +++ beqz(t0, profileArgument); // (index > 0 || MethodData::profile_return()) == false +++ bind(profileReturnType); +++ // If return value type is profiled we may have no argument to profile +++ ld(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset()))); +++ mv(t1, - TypeStackSlotEntries::per_arg_count()); +++ mul(t1, index, t1); +++ add(tmp, tmp, t1); +++ mv(t1, TypeStackSlotEntries::per_arg_count()); +++ add(t0, mdp, off_to_args); +++ blt(tmp, t1, done); +++ +++ bind(profileArgument); +++ +++ ld(tmp, Address(callee, Method::const_offset())); +++ load_unsigned_short(tmp, Address(tmp, ConstMethod::size_of_parameters_offset())); +++ // stack offset o (zero based) from the start of the argument +++ // list, for n arguments translates into offset n - o - 1 from +++ // the end of the argument list +++ mv(t0, stack_slot_offset0); +++ mv(t1, slot_step); +++ mul(t1, index, t1); +++ add(t0, t0, t1); +++ add(t0, mdp, t0); +++ ld(t0, Address(t0)); +++ sub(tmp, tmp, t0); +++ addi(tmp, tmp, -1); +++ Address arg_addr = argument_address(tmp); +++ ld(tmp, arg_addr); +++ +++ mv(t0, argument_type_offset0); +++ mv(t1, type_step); +++ mul(t1, index, t1); +++ add(t0, t0, t1); +++ add(mdo_addr, mdp, t0); +++ Address mdo_arg_addr(mdo_addr, 0); +++ profile_obj_type(tmp, mdo_arg_addr, t1); +++ +++ int to_add = in_bytes(TypeStackSlotEntries::per_arg_size()); +++ addi(off_to_args, off_to_args, to_add); +++ +++ // increment index by 1 +++ addi(index, index, 1); +++ mv(t1, TypeProfileArgsLimit); +++ blt(index, t1, loop); +++ bind(loopEnd); +++ +++ if (MethodData::profile_return()) { +++ ld(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset()))); +++ addi(tmp, tmp, -TypeProfileArgsLimit*TypeStackSlotEntries::per_arg_count()); +++ } +++ +++ add(t0, mdp, off_to_args); +++ bind(done); +++ mv(mdp, t0); +++ +++ // unspill the clobbered registers +++ pop_reg(pushed_registers, sp); +++ +++ if (MethodData::profile_return()) { +++ // We're right after the type profile for the last +++ // argument. tmp is the number of cells left in the +++ // CallTypeData/VirtualCallTypeData to reach its end. Non null +++ // if there's a return to profile. +++ assert(ReturnTypeEntry::static_cell_count() < TypeStackSlotEntries::per_arg_count(), "can't move past ret type"); +++ shadd(mdp, tmp, mdp, tmp, exact_log2(DataLayout::cell_size)); +++ } +++ sd(mdp, Address(fp, frame::interpreter_frame_mdp_offset * wordSize)); +++ } else { +++ assert(MethodData::profile_return(), "either profile call args or call ret"); +++ update_mdp_by_constant(mdp, in_bytes(TypeEntriesAtCall::return_only_size())); +++ } +++ +++ // mdp points right after the end of the +++ // CallTypeData/VirtualCallTypeData, right after the cells for the +++ // return value type if there's one +++ +++ bind(profile_continue); +++ } +++} +++ +++void InterpreterMacroAssembler::profile_return_type(Register mdp, Register ret, Register tmp) { +++ assert_different_registers(mdp, ret, tmp, xbcp, t0, t1); +++ if (ProfileInterpreter && MethodData::profile_return()) { +++ Label profile_continue, done; +++ +++ test_method_data_pointer(mdp, profile_continue); +++ +++ if (MethodData::profile_return_jsr292_only()) { +++ assert(Method::intrinsic_id_size_in_bytes() == 2, "assuming Method::_intrinsic_id is u2"); +++ +++ // If we don't profile all invoke bytecodes we must make sure +++ // it's a bytecode we indeed profile. We can't go back to the +++ // begining of the ProfileData we intend to update to check its +++ // type because we're right after it and we don't known its +++ // length +++ Label do_profile; +++ lbu(t0, Address(xbcp, 0)); +++ mv(tmp, (u1)Bytecodes::_invokedynamic); +++ beq(t0, tmp, do_profile); +++ mv(tmp, (u1)Bytecodes::_invokehandle); +++ beq(t0, tmp, do_profile); +++ get_method(tmp); +++ lhu(t0, Address(tmp, Method::intrinsic_id_offset_in_bytes())); +++ mv(t1, static_cast(vmIntrinsics::_compiledLambdaForm)); +++ bne(t0, t1, profile_continue); +++ bind(do_profile); +++ } +++ +++ Address mdo_ret_addr(mdp, -in_bytes(ReturnTypeEntry::size())); +++ mv(tmp, ret); +++ profile_obj_type(tmp, mdo_ret_addr, t1); +++ +++ bind(profile_continue); +++ } +++} +++ +++void InterpreterMacroAssembler::profile_parameters_type(Register mdp, Register tmp1, Register tmp2, Register tmp3) { +++ assert_different_registers(t0, t1, mdp, tmp1, tmp2, tmp3); +++ if (ProfileInterpreter && MethodData::profile_parameters()) { +++ Label profile_continue, done; +++ +++ test_method_data_pointer(mdp, profile_continue); +++ +++ // Load the offset of the area within the MDO used for +++ // parameters. If it's negative we're not profiling any parameters +++ lwu(tmp1, Address(mdp, in_bytes(MethodData::parameters_type_data_di_offset()) - in_bytes(MethodData::data_offset()))); +++ srli(tmp2, tmp1, 31); +++ bnez(tmp2, profile_continue); // i.e. sign bit set +++ +++ // Compute a pointer to the area for parameters from the offset +++ // and move the pointer to the slot for the last +++ // parameters. Collect profiling from last parameter down. +++ // mdo start + parameters offset + array length - 1 +++ add(mdp, mdp, tmp1); +++ ld(tmp1, Address(mdp, ArrayData::array_len_offset())); +++ add(tmp1, tmp1, - TypeStackSlotEntries::per_arg_count()); +++ +++ Label loop; +++ bind(loop); +++ +++ int off_base = in_bytes(ParametersTypeData::stack_slot_offset(0)); +++ int type_base = in_bytes(ParametersTypeData::type_offset(0)); +++ int per_arg_scale = exact_log2(DataLayout::cell_size); +++ add(t0, mdp, off_base); +++ add(t1, mdp, type_base); +++ +++ shadd(tmp2, tmp1, t0, tmp2, per_arg_scale); +++ // load offset on the stack from the slot for this parameter +++ ld(tmp2, Address(tmp2, 0)); +++ neg(tmp2, tmp2); +++ +++ // read the parameter from the local area +++ shadd(tmp2, tmp2, xlocals, tmp2, Interpreter::logStackElementSize); +++ ld(tmp2, Address(tmp2, 0)); +++ +++ // profile the parameter +++ shadd(t1, tmp1, t1, t0, per_arg_scale); +++ Address arg_type(t1, 0); +++ profile_obj_type(tmp2, arg_type, tmp3); +++ +++ // go to next parameter +++ add(tmp1, tmp1, - TypeStackSlotEntries::per_arg_count()); +++ bgez(tmp1, loop); +++ +++ bind(profile_continue); +++ } +++} +++ +++void InterpreterMacroAssembler::get_method_counters(Register method, +++ Register mcs, Label& skip) { +++ Label has_counters; +++ ld(mcs, Address(method, Method::method_counters_offset())); +++ bnez(mcs, has_counters); +++ call_VM(noreg, CAST_FROM_FN_PTR(address, +++ InterpreterRuntime::build_method_counters), method); +++ ld(mcs, Address(method, Method::method_counters_offset())); +++ beqz(mcs, skip); // No MethodCounters allocated, OutOfMemory +++ bind(has_counters); +++} +++ +++#ifdef ASSERT +++void InterpreterMacroAssembler::verify_access_flags(Register access_flags, uint32_t flag, +++ const char* msg, bool stop_by_hit) { +++ Label L; +++ test_bit(t0, access_flags, exact_log2(flag)); +++ if (stop_by_hit) { +++ beqz(t0, L); +++ } else { +++ bnez(t0, L); +++ } +++ stop(msg); +++ bind(L); +++} +++ +++void InterpreterMacroAssembler::verify_frame_setup() { +++ Label L; +++ const Address monitor_block_top(fp, frame::interpreter_frame_monitor_block_top_offset * wordSize); +++ ld(t0, monitor_block_top); +++ beq(esp, t0, L); +++ stop("broken stack frame setup in interpreter"); +++ bind(L); +++} +++#endif ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/interp_masm_riscv.hpp ++@@ -0,0 +1,285 @@ +++/* +++ * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2014, 2015, Red Hat Inc. All rights reserved. +++ * Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#ifndef CPU_RISCV_INTERP_MASM_RISCV_HPP +++#define CPU_RISCV_INTERP_MASM_RISCV_HPP +++ +++#include "asm/macroAssembler.hpp" +++#include "interpreter/invocationCounter.hpp" +++#include "runtime/frame.hpp" +++ +++// This file specializes the assember with interpreter-specific macros +++ +++typedef ByteSize (*OffsetFunction)(uint); +++ +++class InterpreterMacroAssembler: public MacroAssembler { +++ protected: +++ // Interpreter specific version of call_VM_base +++ using MacroAssembler::call_VM_leaf_base; +++ +++ virtual void call_VM_leaf_base(address entry_point, +++ int number_of_arguments); +++ +++ virtual void call_VM_base(Register oop_result, +++ Register java_thread, +++ Register last_java_sp, +++ address entry_point, +++ int number_of_arguments, +++ bool check_exceptions); +++ +++ // base routine for all dispatches +++ void dispatch_base(TosState state, address* table, bool verifyoop = true, +++ bool generate_poll = false, Register Rs = t0); +++ +++ public: +++ InterpreterMacroAssembler(CodeBuffer* code) : MacroAssembler(code) {} +++ virtual ~InterpreterMacroAssembler() {} +++ +++ void load_earlyret_value(TosState state); +++ +++ void jump_to_entry(address entry); +++ +++ virtual void check_and_handle_popframe(Register java_thread); +++ virtual void check_and_handle_earlyret(Register java_thread); +++ +++ // Interpreter-specific registers +++ void save_bcp() { +++ sd(xbcp, Address(fp, frame::interpreter_frame_bcp_offset * wordSize)); +++ } +++ +++ void restore_bcp() { +++ ld(xbcp, Address(fp, frame::interpreter_frame_bcp_offset * wordSize)); +++ } +++ +++ void restore_locals() { +++ ld(xlocals, Address(fp, frame::interpreter_frame_locals_offset * wordSize)); +++ } +++ +++ void restore_constant_pool_cache() { +++ ld(xcpool, Address(fp, frame::interpreter_frame_cache_offset * wordSize)); +++ } +++ +++ void get_dispatch(); +++ +++ // Helpers for runtime call arguments/results +++ void get_method(Register reg) { +++ ld(reg, Address(fp, frame::interpreter_frame_method_offset * wordSize)); +++ } +++ +++ void get_const(Register reg) { +++ get_method(reg); +++ ld(reg, Address(reg, in_bytes(Method::const_offset()))); +++ } +++ +++ void get_constant_pool(Register reg) { +++ get_const(reg); +++ ld(reg, Address(reg, in_bytes(ConstMethod::constants_offset()))); +++ } +++ +++ void get_constant_pool_cache(Register reg) { +++ get_constant_pool(reg); +++ ld(reg, Address(reg, ConstantPool::cache_offset_in_bytes())); +++ } +++ +++ void get_cpool_and_tags(Register cpool, Register tags) { +++ get_constant_pool(cpool); +++ ld(tags, Address(cpool, ConstantPool::tags_offset_in_bytes())); +++ } +++ +++ void get_unsigned_2_byte_index_at_bcp(Register reg, int bcp_offset); +++ void get_cache_and_index_at_bcp(Register cache, Register index, int bcp_offset, size_t index_size = sizeof(u2)); +++ void get_cache_and_index_and_bytecode_at_bcp(Register cache, Register index, Register bytecode, int byte_no, int bcp_offset, size_t index_size = sizeof(u2)); +++ void get_cache_entry_pointer_at_bcp(Register cache, Register tmp, int bcp_offset, size_t index_size = sizeof(u2)); +++ void get_cache_index_at_bcp(Register index, int bcp_offset, size_t index_size = sizeof(u2)); +++ void get_method_counters(Register method, Register mcs, Label& skip); +++ +++ // Load cpool->resolved_references(index). +++ void load_resolved_reference_at_index(Register result, Register index, Register tmp = x15); +++ +++ // Load cpool->resolved_klass_at(index). +++ void load_resolved_klass_at_offset(Register cpool, Register index, Register klass, Register temp); +++ +++ void load_resolved_method_at_index(int byte_no, Register method, Register cache); +++ +++ void pop_ptr(Register r = x10); +++ void pop_i(Register r = x10); +++ void pop_l(Register r = x10); +++ void pop_f(FloatRegister r = f10); +++ void pop_d(FloatRegister r = f10); +++ void push_ptr(Register r = x10); +++ void push_i(Register r = x10); +++ void push_l(Register r = x10); +++ void push_f(FloatRegister r = f10); +++ void push_d(FloatRegister r = f10); +++ +++ void pop(TosState state); // transition vtos -> state +++ void push(TosState state); // transition state -> vtos +++ +++ void empty_expression_stack() { +++ ld(esp, Address(fp, frame::interpreter_frame_monitor_block_top_offset * wordSize)); +++ // NULL last_sp until next java call +++ sd(zr, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize)); +++ } +++ +++ // Helpers for swap and dup +++ void load_ptr(int n, Register val); +++ void store_ptr(int n, Register val); +++ +++ // Load float value from 'address'. The value is loaded onto the fp register f10. +++ void load_float(Address src); +++ void load_double(Address src); +++ +++ // Generate a subtype check: branch to ok_is_subtype if sub_klass is +++ // a subtype of super_klass. +++ void gen_subtype_check( Register sub_klass, Label &ok_is_subtype ); +++ +++ // Dispatching +++ void dispatch_prolog(TosState state, int step = 0); +++ void dispatch_epilog(TosState state, int step = 0); +++ // dispatch via t0 +++ void dispatch_only(TosState state, bool generate_poll = false, Register Rs = t0); +++ // dispatch normal table via t0 (assume t0 is loaded already) +++ void dispatch_only_normal(TosState state, Register Rs = t0); +++ void dispatch_only_noverify(TosState state, Register Rs = t0); +++ // load t0 from [xbcp + step] and dispatch via t0 +++ void dispatch_next(TosState state, int step = 0, bool generate_poll = false); +++ // load t0 from [xbcp] and dispatch via t0 and table +++ void dispatch_via (TosState state, address* table); +++ +++ // jump to an invoked target +++ void prepare_to_jump_from_interpreted(); +++ void jump_from_interpreted(Register method); +++ +++ +++ // Returning from interpreted functions +++ // +++ // Removes the current activation (incl. unlocking of monitors) +++ // and sets up the return address. This code is also used for +++ // exception unwindwing. In that case, we do not want to throw +++ // IllegalMonitorStateExceptions, since that might get us into an +++ // infinite rethrow exception loop. +++ // Additionally this code is used for popFrame and earlyReturn. +++ // In popFrame case we want to skip throwing an exception, +++ // installing an exception, and notifying jvmdi. +++ // In earlyReturn case we only want to skip throwing an exception +++ // and installing an exception. +++ void remove_activation(TosState state, +++ bool throw_monitor_exception = true, +++ bool install_monitor_exception = true, +++ bool notify_jvmdi = true); +++ +++ // FIXME: Give us a valid frame at a null check. +++ virtual void null_check(Register reg, int offset = -1) { +++ MacroAssembler::null_check(reg, offset); +++ } +++ +++ // Object locking +++ void lock_object (Register lock_reg); +++ void unlock_object(Register lock_reg); +++ +++ // Interpreter profiling operations +++ void set_method_data_pointer_for_bcp(); +++ void test_method_data_pointer(Register mdp, Label& zero_continue); +++ void verify_method_data_pointer(); +++ +++ void set_mdp_data_at(Register mdp_in, int constant, Register value); +++ void increment_mdp_data_at(Address data, bool decrement = false); +++ void increment_mdp_data_at(Register mdp_in, int constant, +++ bool decrement = false); +++ void increment_mdp_data_at(Register mdp_in, Register reg, int constant, +++ bool decrement = false); +++ void increment_mask_and_jump(Address counter_addr, +++ int increment, Address mask, +++ Register tmp1, Register tmp2, +++ bool preloaded, Label* where); +++ +++ void set_mdp_flag_at(Register mdp_in, int flag_constant); +++ void test_mdp_data_at(Register mdp_in, int offset, Register value, +++ Register test_value_out, +++ Label& not_equal_continue); +++ +++ void record_klass_in_profile(Register receiver, Register mdp, +++ Register reg2, bool is_virtual_call); +++ void record_klass_in_profile_helper(Register receiver, Register mdp, +++ Register reg2, +++ Label& done, bool is_virtual_call); +++ void record_item_in_profile_helper(Register item, Register mdp, +++ Register reg2, int start_row, Label& done, int total_rows, +++ OffsetFunction item_offset_fn, OffsetFunction item_count_offset_fn, +++ int non_profiled_offset); +++ +++ void update_mdp_by_offset(Register mdp_in, int offset_of_offset); +++ void update_mdp_by_offset(Register mdp_in, Register reg, int offset_of_disp); +++ void update_mdp_by_constant(Register mdp_in, int constant); +++ void update_mdp_for_ret(Register return_bci); +++ +++ // narrow int return value +++ void narrow(Register result); +++ +++ void profile_taken_branch(Register mdp, Register bumped_count); +++ void profile_not_taken_branch(Register mdp); +++ void profile_call(Register mdp); +++ void profile_final_call(Register mdp); +++ void profile_virtual_call(Register receiver, Register mdp, +++ Register t1, +++ bool receiver_can_be_null = false); +++ void profile_ret(Register return_bci, Register mdp); +++ void profile_null_seen(Register mdp); +++ void profile_typecheck(Register mdp, Register klass, Register temp); +++ void profile_typecheck_failed(Register mdp); +++ void profile_switch_default(Register mdp); +++ void profile_switch_case(Register index_in_scratch, Register mdp, +++ Register temp); +++ +++ void profile_obj_type(Register obj, const Address& mdo_addr, Register tmp); +++ void profile_arguments_type(Register mdp, Register callee, Register tmp, bool is_virtual); +++ void profile_return_type(Register mdp, Register ret, Register tmp); +++ void profile_parameters_type(Register mdp, Register tmp1, Register tmp2, Register tmp3); +++ +++ // Debugging +++ // only if +VerifyFPU && (state == ftos || state == dtos) +++ void verify_FPU(int stack_depth, TosState state = ftos); +++ +++ typedef enum { NotifyJVMTI, SkipNotifyJVMTI } NotifyMethodExitMode; +++ +++ // support for jvmti/dtrace +++ void notify_method_entry(); +++ void notify_method_exit(TosState state, NotifyMethodExitMode mode); +++ +++ virtual void _call_Unimplemented(address call_site) { +++ save_bcp(); +++ set_last_Java_frame(esp, fp, (address) pc(), t0); +++ MacroAssembler::_call_Unimplemented(call_site); +++ } +++ +++#ifdef ASSERT +++ void verify_access_flags(Register access_flags, uint32_t flag, +++ const char* msg, bool stop_by_hit = true); +++ void verify_frame_setup(); +++#endif +++}; +++ +++#endif // CPU_RISCV_INTERP_MASM_RISCV_HPP ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/interpreterRT_riscv.cpp ++@@ -0,0 +1,295 @@ +++/* +++ * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved. +++ * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#include "precompiled.hpp" +++#include "asm/macroAssembler.inline.hpp" +++#include "interpreter/interp_masm.hpp" +++#include "interpreter/interpreter.hpp" +++#include "interpreter/interpreterRuntime.hpp" +++#include "memory/allocation.inline.hpp" +++#include "memory/universe.hpp" +++#include "oops/method.hpp" +++#include "oops/oop.inline.hpp" +++#include "runtime/handles.inline.hpp" +++#include "runtime/icache.hpp" +++#include "runtime/interfaceSupport.inline.hpp" +++#include "runtime/signature.hpp" +++ +++#define __ _masm-> +++ +++// Implementation of SignatureHandlerGenerator +++Register InterpreterRuntime::SignatureHandlerGenerator::from() { return xlocals; } +++Register InterpreterRuntime::SignatureHandlerGenerator::to() { return sp; } +++Register InterpreterRuntime::SignatureHandlerGenerator::temp() { return t0; } +++ +++Register InterpreterRuntime::SignatureHandlerGenerator::next_gpr() { +++ if (_num_reg_int_args < Argument::n_int_register_parameters_c - 1) { +++ return g_INTArgReg[++_num_reg_int_args]; +++ } +++ return noreg; +++} +++ +++FloatRegister InterpreterRuntime::SignatureHandlerGenerator::next_fpr() { +++ if (_num_reg_fp_args < Argument::n_float_register_parameters_c) { +++ return g_FPArgReg[_num_reg_fp_args++]; +++ } else { +++ return fnoreg; +++ } +++} +++ +++int InterpreterRuntime::SignatureHandlerGenerator::next_stack_offset() { +++ int ret = _stack_offset; +++ _stack_offset += wordSize; +++ return ret; +++} +++ +++InterpreterRuntime::SignatureHandlerGenerator::SignatureHandlerGenerator( +++ const methodHandle& method, CodeBuffer* buffer) : NativeSignatureIterator(method) { +++ _masm = new MacroAssembler(buffer); // allocate on resourse area by default +++ _num_reg_int_args = (method->is_static() ? 1 : 0); +++ _num_reg_fp_args = 0; +++ _stack_offset = 0; +++} +++ +++void InterpreterRuntime::SignatureHandlerGenerator::pass_int() { +++ const Address src(from(), Interpreter::local_offset_in_bytes(offset())); +++ +++ Register reg = next_gpr(); +++ if (reg != noreg) { +++ __ lw(reg, src); +++ } else { +++ __ lw(x10, src); +++ __ sw(x10, Address(to(), next_stack_offset())); +++ } +++} +++ +++void InterpreterRuntime::SignatureHandlerGenerator::pass_long() { +++ const Address src(from(), Interpreter::local_offset_in_bytes(offset() + 1)); +++ +++ Register reg = next_gpr(); +++ if (reg != noreg) { +++ __ ld(reg, src); +++ } else { +++ __ ld(x10, src); +++ __ sd(x10, Address(to(), next_stack_offset())); +++ } +++} +++ +++void InterpreterRuntime::SignatureHandlerGenerator::pass_float() { +++ const Address src(from(), Interpreter::local_offset_in_bytes(offset())); +++ +++ FloatRegister reg = next_fpr(); +++ if (reg != fnoreg) { +++ __ flw(reg, src); +++ } else { +++ // a floating-point argument is passed according to the integer calling +++ // convention if no floating-point argument register available +++ pass_int(); +++ } +++} +++ +++void InterpreterRuntime::SignatureHandlerGenerator::pass_double() { +++ const Address src(from(), Interpreter::local_offset_in_bytes(offset() + 1)); +++ +++ FloatRegister reg = next_fpr(); +++ if (reg != fnoreg) { +++ __ fld(reg, src); +++ } else { +++ // a floating-point argument is passed according to the integer calling +++ // convention if no floating-point argument register available +++ pass_long(); +++ } +++} +++ +++void InterpreterRuntime::SignatureHandlerGenerator::pass_object() { +++ Register reg = next_gpr(); +++ if (reg == c_rarg1) { +++ assert(offset() == 0, "argument register 1 can only be (non-null) receiver"); +++ __ addi(c_rarg1, from(), Interpreter::local_offset_in_bytes(offset())); +++ } else if (reg != noreg) { +++ // c_rarg2-c_rarg7 +++ __ addi(x10, from(), Interpreter::local_offset_in_bytes(offset())); +++ __ mv(reg, zr); //_num_reg_int_args:c_rarg -> 1:c_rarg2, 2:c_rarg3... +++ __ ld(temp(), x10); +++ Label L; +++ __ beqz(temp(), L); +++ __ mv(reg, x10); +++ __ bind(L); +++ } else { +++ //to stack +++ __ addi(x10, from(), Interpreter::local_offset_in_bytes(offset())); +++ __ ld(temp(), x10); +++ Label L; +++ __ bnez(temp(), L); +++ __ mv(x10, zr); +++ __ bind(L); +++ assert(sizeof(jobject) == wordSize, ""); +++ __ sd(x10, Address(to(), next_stack_offset())); +++ } +++} +++ +++void InterpreterRuntime::SignatureHandlerGenerator::generate(uint64_t fingerprint) { +++ // generate code to handle arguments +++ iterate(fingerprint); +++ +++ // return result handler +++ __ la(x10, ExternalAddress(Interpreter::result_handler(method()->result_type()))); +++ __ ret(); +++ +++ __ flush(); +++} +++ +++ +++// Implementation of SignatureHandlerLibrary +++ +++void SignatureHandlerLibrary::pd_set_handler(address handler) {} +++ +++ +++class SlowSignatureHandler +++ : public NativeSignatureIterator { +++ private: +++ address _from; +++ intptr_t* _to; +++ intptr_t* _int_args; +++ intptr_t* _fp_args; +++ intptr_t* _fp_identifiers; +++ unsigned int _num_reg_int_args; +++ unsigned int _num_reg_fp_args; +++ +++ intptr_t* single_slot_addr() { +++ intptr_t* from_addr = (intptr_t*)(_from + Interpreter::local_offset_in_bytes(0)); +++ _from -= Interpreter::stackElementSize; +++ return from_addr; +++ } +++ +++ intptr_t* double_slot_addr() { +++ intptr_t* from_addr = (intptr_t*)(_from + Interpreter::local_offset_in_bytes(1)); +++ _from -= 2 * Interpreter::stackElementSize; +++ return from_addr; +++ } +++ +++ int pass_gpr(intptr_t value) { +++ if (_num_reg_int_args < Argument::n_int_register_parameters_c - 1) { +++ *_int_args++ = value; +++ return _num_reg_int_args++; +++ } +++ return -1; +++ } +++ +++ int pass_fpr(intptr_t value) { +++ if (_num_reg_fp_args < Argument::n_float_register_parameters_c) { +++ *_fp_args++ = value; +++ return _num_reg_fp_args++; +++ } +++ return -1; +++ } +++ +++ void pass_stack(intptr_t value) { +++ *_to++ = value; +++ } +++ +++ virtual void pass_int() { +++ jint value = *(jint*)single_slot_addr(); +++ if (pass_gpr(value) < 0) { +++ pass_stack(value); +++ } +++ } +++ +++ virtual void pass_long() { +++ intptr_t value = *double_slot_addr(); +++ if (pass_gpr(value) < 0) { +++ pass_stack(value); +++ } +++ } +++ +++ virtual void pass_object() { +++ intptr_t* addr = single_slot_addr(); +++ intptr_t value = *addr == 0 ? NULL : (intptr_t)addr; +++ if (pass_gpr(value) < 0) { +++ pass_stack(value); +++ } +++ } +++ +++ virtual void pass_float() { +++ jint value = *(jint*) single_slot_addr(); +++ // a floating-point argument is passed according to the integer calling +++ // convention if no floating-point argument register available +++ if (pass_fpr(value) < 0 && pass_gpr(value) < 0) { +++ pass_stack(value); +++ } +++ } +++ +++ virtual void pass_double() { +++ intptr_t value = *double_slot_addr(); +++ int arg = pass_fpr(value); +++ if (0 <= arg) { +++ *_fp_identifiers |= (1ull << arg); // mark as double +++ } else if (pass_gpr(value) < 0) { // no need to mark if passing by integer registers or stack +++ pass_stack(value); +++ } +++ } +++ +++ public: +++ SlowSignatureHandler(const methodHandle& method, address from, intptr_t* to) +++ : NativeSignatureIterator(method) +++ { +++ _from = from; +++ _to = to; +++ +++ _int_args = to - (method->is_static() ? 16 : 17); +++ _fp_args = to - 8; +++ _fp_identifiers = to - 9; +++ *(int*) _fp_identifiers = 0; +++ _num_reg_int_args = (method->is_static() ? 1 : 0); +++ _num_reg_fp_args = 0; +++ } +++ +++ ~SlowSignatureHandler() +++ { +++ _from = NULL; +++ _to = NULL; +++ _int_args = NULL; +++ _fp_args = NULL; +++ _fp_identifiers = NULL; +++ } +++}; +++ +++ +++JRT_ENTRY(address, +++ InterpreterRuntime::slow_signature_handler(JavaThread* current, +++ Method* method, +++ intptr_t* from, +++ intptr_t* to)) +++ methodHandle m(current, (Method*)method); +++ assert(m->is_native(), "sanity check"); +++ +++ // handle arguments +++ SlowSignatureHandler ssh(m, (address)from, to); +++ ssh.iterate(UCONST64(-1)); +++ +++ // return result handler +++ return Interpreter::result_handler(m->result_type()); +++JRT_END ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/interpreterRT_riscv.hpp ++@@ -0,0 +1,68 @@ +++/* +++ * Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2014, Red Hat Inc. All rights reserved. +++ * Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#ifndef CPU_RISCV_INTERPRETERRT_RISCV_HPP +++#define CPU_RISCV_INTERPRETERRT_RISCV_HPP +++ +++// This is included in the middle of class Interpreter. +++// Do not include files here. +++ +++// native method calls +++ +++class SignatureHandlerGenerator: public NativeSignatureIterator { +++ private: +++ MacroAssembler* _masm; +++ unsigned int _num_reg_fp_args; +++ unsigned int _num_reg_int_args; +++ int _stack_offset; +++ +++ void pass_int(); +++ void pass_long(); +++ void pass_float(); +++ void pass_double(); +++ void pass_object(); +++ +++ Register next_gpr(); +++ FloatRegister next_fpr(); +++ int next_stack_offset(); +++ +++ public: +++ // Creation +++ SignatureHandlerGenerator(const methodHandle& method, CodeBuffer* buffer); +++ virtual ~SignatureHandlerGenerator() { +++ _masm = NULL; +++ } +++ +++ // Code generation +++ void generate(uint64_t fingerprint); +++ +++ // Code generation support +++ static Register from(); +++ static Register to(); +++ static Register temp(); +++}; +++ +++#endif // CPU_RISCV_INTERPRETERRT_RISCV_HPP ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/javaFrameAnchor_riscv.hpp ++@@ -0,0 +1,86 @@ +++/* +++ * Copyright (c) 2002, 2019, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#ifndef CPU_RISCV_JAVAFRAMEANCHOR_RISCV_HPP +++#define CPU_RISCV_JAVAFRAMEANCHOR_RISCV_HPP +++ +++private: +++ +++ // FP value associated with _last_Java_sp: +++ intptr_t* volatile _last_Java_fp; // pointer is volatile not what it points to +++ +++public: +++ // Each arch must define reset, save, restore +++ // These are used by objects that only care about: +++ // 1 - initializing a new state (thread creation, javaCalls) +++ // 2 - saving a current state (javaCalls) +++ // 3 - restoring an old state (javaCalls) +++ +++ void clear(void) { +++ // clearing _last_Java_sp must be first +++ _last_Java_sp = NULL; +++ OrderAccess::release(); +++ _last_Java_fp = NULL; +++ _last_Java_pc = NULL; +++ } +++ +++ void copy(JavaFrameAnchor* src) { +++ // In order to make sure the transition state is valid for "this" +++ // We must clear _last_Java_sp before copying the rest of the new data +++ // +++ // Hack Alert: Temporary bugfix for 4717480/4721647 +++ // To act like previous version (pd_cache_state) don't NULL _last_Java_sp +++ // unless the value is changing +++ // +++ assert(src != NULL, "Src should not be NULL."); +++ if (_last_Java_sp != src->_last_Java_sp) { +++ _last_Java_sp = NULL; +++ OrderAccess::release(); +++ } +++ _last_Java_fp = src->_last_Java_fp; +++ _last_Java_pc = src->_last_Java_pc; +++ // Must be last so profiler will always see valid frame if has_last_frame() is true +++ _last_Java_sp = src->_last_Java_sp; +++ } +++ +++ bool walkable(void) { return _last_Java_sp != NULL && _last_Java_pc != NULL; } +++ +++ void make_walkable(); +++ +++ intptr_t* last_Java_sp(void) const { return _last_Java_sp; } +++ +++ const address last_Java_pc(void) { return _last_Java_pc; } +++ +++private: +++ +++ static ByteSize last_Java_fp_offset() { return byte_offset_of(JavaFrameAnchor, _last_Java_fp); } +++ +++public: +++ +++ void set_last_Java_sp(intptr_t* java_sp) { _last_Java_sp = java_sp; OrderAccess::release(); } +++ +++ intptr_t* last_Java_fp(void) { return _last_Java_fp; } +++ +++#endif // CPU_RISCV_JAVAFRAMEANCHOR_RISCV_HPP ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/jniFastGetField_riscv.cpp ++@@ -0,0 +1,221 @@ +++/* +++ * Copyright (c) 2004, 2020, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved. +++ * Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#include "precompiled.hpp" +++#include "asm/macroAssembler.hpp" +++#include "gc/shared/barrierSet.hpp" +++#include "gc/shared/barrierSetAssembler.hpp" +++#include "memory/resourceArea.hpp" +++#include "prims/jniFastGetField.hpp" +++#include "prims/jvm_misc.hpp" +++#include "prims/jvmtiExport.hpp" +++#include "runtime/safepoint.hpp" +++ +++#define __ masm-> +++ +++#define BUFFER_SIZE 30*wordSize +++ +++// Instead of issuing a LoadLoad barrier we create an address +++// dependency between loads; this might be more efficient. +++ +++// Common register usage: +++// x10/f10: result +++// c_rarg0: jni env +++// c_rarg1: obj +++// c_rarg2: jfield id +++ +++static const Register robj = x13; +++static const Register rcounter = x14; +++static const Register roffset = x15; +++static const Register rcounter_addr = x16; +++static const Register result = x17; +++ +++address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) { +++ const char *name; +++ switch (type) { +++ case T_BOOLEAN: name = "jni_fast_GetBooleanField"; break; +++ case T_BYTE: name = "jni_fast_GetByteField"; break; +++ case T_CHAR: name = "jni_fast_GetCharField"; break; +++ case T_SHORT: name = "jni_fast_GetShortField"; break; +++ case T_INT: name = "jni_fast_GetIntField"; break; +++ case T_LONG: name = "jni_fast_GetLongField"; break; +++ case T_FLOAT: name = "jni_fast_GetFloatField"; break; +++ case T_DOUBLE: name = "jni_fast_GetDoubleField"; break; +++ default: ShouldNotReachHere(); +++ name = NULL; // unreachable +++ } +++ ResourceMark rm; +++ BufferBlob* blob = BufferBlob::create(name, BUFFER_SIZE); +++ CodeBuffer cbuf(blob); +++ MacroAssembler* masm = new MacroAssembler(&cbuf); +++ address fast_entry = __ pc(); +++ +++ Address target(SafepointSynchronize::safepoint_counter_addr()); +++ __ relocate(target.rspec(), [&] { +++ int32_t offset; +++ __ la_patchable(rcounter_addr, target, offset); +++ __ addi(rcounter_addr, rcounter_addr, offset); +++ }); +++ +++ Label slow; +++ Address safepoint_counter_addr(rcounter_addr, 0); +++ __ lwu(rcounter, safepoint_counter_addr); +++ // An even value means there are no ongoing safepoint operations +++ __ test_bit(t0, rcounter, 0); +++ __ bnez(t0, slow); +++ +++ if (JvmtiExport::can_post_field_access()) { +++ // Using barrier to order wrt. JVMTI check and load of result. +++ __ membar(MacroAssembler::LoadLoad); +++ +++ // Check to see if a field access watch has been set before we +++ // take the fast path. +++ ExternalAddress target((address) JvmtiExport::get_field_access_count_addr()); +++ __ relocate(target.rspec(), [&] { +++ int32_t offset; +++ __ la_patchable(result, target, offset); +++ __ lwu(result, Address(result, offset)); +++ }); +++ __ bnez(result, slow); +++ +++ __ mv(robj, c_rarg1); +++ } else { +++ // Using address dependency to order wrt. load of result. +++ __ xorr(robj, c_rarg1, rcounter); +++ __ xorr(robj, robj, rcounter); // obj, since +++ // robj ^ rcounter ^ rcounter == robj +++ // robj is address dependent on rcounter. +++ } +++ +++ // Both robj and t0 are clobbered by try_resolve_jobject_in_native. +++ BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); +++ assert_cond(bs != NULL); +++ bs->try_resolve_jobject_in_native(masm, c_rarg0, robj, t0, slow); +++ +++ __ srli(roffset, c_rarg2, 2); // offset +++ +++ assert(count < LIST_CAPACITY, "LIST_CAPACITY too small"); +++ speculative_load_pclist[count] = __ pc(); // Used by the segfault handler +++ __ add(roffset, robj, roffset); +++ +++ switch (type) { +++ case T_BOOLEAN: __ lbu(result, Address(roffset, 0)); break; +++ case T_BYTE: __ lb(result, Address(roffset, 0)); break; +++ case T_CHAR: __ lhu(result, Address(roffset, 0)); break; +++ case T_SHORT: __ lh(result, Address(roffset, 0)); break; +++ case T_INT: __ lw(result, Address(roffset, 0)); break; +++ case T_LONG: __ ld(result, Address(roffset, 0)); break; +++ case T_FLOAT: { +++ __ flw(f28, Address(roffset, 0)); // f28 as temporaries +++ __ fmv_x_w(result, f28); // f{31--0}-->x +++ break; +++ } +++ case T_DOUBLE: { +++ __ fld(f28, Address(roffset, 0)); // f28 as temporaries +++ __ fmv_x_d(result, f28); // d{63--0}-->x +++ break; +++ } +++ default: ShouldNotReachHere(); +++ } +++ +++ // Using acquire: Order JVMTI check and load of result wrt. succeeding check +++ // (LoadStore for volatile field). +++ __ membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore); +++ +++ __ lw(t0, safepoint_counter_addr); +++ __ bne(rcounter, t0, slow); +++ +++ switch (type) { +++ case T_FLOAT: __ fmv_w_x(f10, result); break; +++ case T_DOUBLE: __ fmv_d_x(f10, result); break; +++ default: __ mv(x10, result); break; +++ } +++ __ ret(); +++ +++ slowcase_entry_pclist[count++] = __ pc(); +++ __ bind(slow); +++ address slow_case_addr; +++ switch (type) { +++ case T_BOOLEAN: slow_case_addr = jni_GetBooleanField_addr(); break; +++ case T_BYTE: slow_case_addr = jni_GetByteField_addr(); break; +++ case T_CHAR: slow_case_addr = jni_GetCharField_addr(); break; +++ case T_SHORT: slow_case_addr = jni_GetShortField_addr(); break; +++ case T_INT: slow_case_addr = jni_GetIntField_addr(); break; +++ case T_LONG: slow_case_addr = jni_GetLongField_addr(); break; +++ case T_FLOAT: slow_case_addr = jni_GetFloatField_addr(); break; +++ case T_DOUBLE: slow_case_addr = jni_GetDoubleField_addr(); break; +++ default: ShouldNotReachHere(); +++ slow_case_addr = NULL; // unreachable +++ } +++ +++ { +++ __ enter(); +++ ExternalAddress target(slow_case_addr); +++ __ relocate(target.rspec(), [&] { +++ int32_t offset; +++ __ la_patchable(t0, target, offset); +++ __ jalr(x1, t0, offset); +++ }); +++ __ leave(); +++ __ ret(); +++ } +++ __ flush(); +++ +++ return fast_entry; +++} +++ +++ +++address JNI_FastGetField::generate_fast_get_boolean_field() { +++ return generate_fast_get_int_field0(T_BOOLEAN); +++} +++ +++address JNI_FastGetField::generate_fast_get_byte_field() { +++ return generate_fast_get_int_field0(T_BYTE); +++} +++ +++address JNI_FastGetField::generate_fast_get_char_field() { +++ return generate_fast_get_int_field0(T_CHAR); +++} +++ +++address JNI_FastGetField::generate_fast_get_short_field() { +++ return generate_fast_get_int_field0(T_SHORT); +++} +++ +++address JNI_FastGetField::generate_fast_get_int_field() { +++ return generate_fast_get_int_field0(T_INT); +++} +++ +++address JNI_FastGetField::generate_fast_get_long_field() { +++ return generate_fast_get_int_field0(T_LONG); +++} +++ +++address JNI_FastGetField::generate_fast_get_float_field() { +++ return generate_fast_get_int_field0(T_FLOAT); +++} +++ +++address JNI_FastGetField::generate_fast_get_double_field() { +++ return generate_fast_get_int_field0(T_DOUBLE); +++} ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/jniTypes_riscv.hpp ++@@ -0,0 +1,106 @@ +++/* +++ * Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#ifndef CPU_RISCV_JNITYPES_RISCV_HPP +++#define CPU_RISCV_JNITYPES_RISCV_HPP +++ +++#include "jni.h" +++#include "memory/allStatic.hpp" +++#include "oops/oop.hpp" +++ +++// This file holds platform-dependent routines used to write primitive jni +++// types to the array of arguments passed into JavaCalls::call +++ +++class JNITypes : private AllStatic { +++ // These functions write a java primitive type (in native format) +++ // to a java stack slot array to be passed as an argument to JavaCalls:calls. +++ // I.e., they are functionally 'push' operations if they have a 'pos' +++ // formal parameter. Note that jlong's and jdouble's are written +++ // _in reverse_ of the order in which they appear in the interpreter +++ // stack. This is because call stubs (see stubGenerator_sparc.cpp) +++ // reverse the argument list constructed by JavaCallArguments (see +++ // javaCalls.hpp). +++ +++public: +++ // Ints are stored in native format in one JavaCallArgument slot at *to. +++ static inline void put_int(jint from, intptr_t *to) { *(jint *)(to + 0 ) = from; } +++ static inline void put_int(jint from, intptr_t *to, int& pos) { *(jint *)(to + pos++) = from; } +++ static inline void put_int(jint *from, intptr_t *to, int& pos) { *(jint *)(to + pos++) = *from; } +++ +++ // Longs are stored in native format in one JavaCallArgument slot at +++ // *(to+1). +++ static inline void put_long(jlong from, intptr_t *to) { +++ *(jlong*) (to + 1) = from; +++ } +++ +++ static inline void put_long(jlong from, intptr_t *to, int& pos) { +++ *(jlong*) (to + 1 + pos) = from; +++ pos += 2; +++ } +++ +++ static inline void put_long(jlong *from, intptr_t *to, int& pos) { +++ *(jlong*) (to + 1 + pos) = *from; +++ pos += 2; +++ } +++ +++ // Oops are stored in native format in one JavaCallArgument slot at *to. +++ static inline void put_obj(const Handle& from_handle, intptr_t *to, int& pos) { *(to + pos++) = (intptr_t)from_handle.raw_value(); } +++ static inline void put_obj(jobject from_handle, intptr_t *to, int& pos) { *(to + pos++) = (intptr_t)from_handle; } +++ +++ // Floats are stored in native format in one JavaCallArgument slot at *to. +++ static inline void put_float(jfloat from, intptr_t *to) { *(jfloat *)(to + 0 ) = from; } +++ static inline void put_float(jfloat from, intptr_t *to, int& pos) { *(jfloat *)(to + pos++) = from; } +++ static inline void put_float(jfloat *from, intptr_t *to, int& pos) { *(jfloat *)(to + pos++) = *from; } +++ +++#undef _JNI_SLOT_OFFSET +++#define _JNI_SLOT_OFFSET 1 +++ // Doubles are stored in native word format in one JavaCallArgument +++ // slot at *(to+1). +++ static inline void put_double(jdouble from, intptr_t *to) { +++ *(jdouble*) (to + 1) = from; +++ } +++ +++ static inline void put_double(jdouble from, intptr_t *to, int& pos) { +++ *(jdouble*) (to + 1 + pos) = from; +++ pos += 2; +++ } +++ +++ static inline void put_double(jdouble *from, intptr_t *to, int& pos) { +++ *(jdouble*) (to + 1 + pos) = *from; +++ pos += 2; +++ } +++ +++ // The get_xxx routines, on the other hand, actually _do_ fetch +++ // java primitive types from the interpreter stack. +++ // No need to worry about alignment on Intel. +++ static inline jint get_int (intptr_t *from) { return *(jint *) from; } +++ static inline jlong get_long (intptr_t *from) { return *(jlong *) (from + _JNI_SLOT_OFFSET); } +++ static inline oop get_obj (intptr_t *from) { return *(oop *) from; } +++ static inline jfloat get_float (intptr_t *from) { return *(jfloat *) from; } +++ static inline jdouble get_double(intptr_t *from) { return *(jdouble *)(from + _JNI_SLOT_OFFSET); } +++#undef _JNI_SLOT_OFFSET +++}; +++ +++#endif // CPU_RISCV_JNITYPES_RISCV_HPP ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/macroAssembler_riscv.cpp ++@@ -0,0 +1,4392 @@ +++/* +++ * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved. +++ * Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#include "precompiled.hpp" +++#include "asm/assembler.hpp" +++#include "asm/assembler.inline.hpp" +++#include "compiler/disassembler.hpp" +++#include "gc/shared/barrierSet.hpp" +++#include "gc/shared/barrierSetAssembler.hpp" +++#include "gc/shared/cardTable.hpp" +++#include "gc/shared/cardTableBarrierSet.hpp" +++#include "gc/shared/collectedHeap.hpp" +++#include "interpreter/bytecodeHistogram.hpp" +++#include "interpreter/interpreter.hpp" +++#include "memory/resourceArea.hpp" +++#include "memory/universe.hpp" +++#include "nativeInst_riscv.hpp" +++#include "oops/accessDecorators.hpp" +++#include "oops/compressedOops.inline.hpp" +++#include "oops/klass.inline.hpp" +++#include "oops/oop.hpp" +++#include "runtime/biasedLocking.hpp" +++#include "runtime/interfaceSupport.inline.hpp" +++#include "runtime/jniHandles.inline.hpp" +++#include "runtime/sharedRuntime.hpp" +++#include "runtime/stubRoutines.hpp" +++#include "runtime/thread.hpp" +++#include "utilities/powerOfTwo.hpp" +++#ifdef COMPILER2 +++#include "opto/compile.hpp" +++#include "opto/node.hpp" +++#include "opto/output.hpp" +++#endif +++ +++#ifdef PRODUCT +++#define BLOCK_COMMENT(str) /* nothing */ +++#else +++#define BLOCK_COMMENT(str) block_comment(str) +++#endif +++#define BIND(label) bind(label); __ BLOCK_COMMENT(#label ":") +++ +++static void pass_arg0(MacroAssembler* masm, Register arg) { +++ if (c_rarg0 != arg) { +++ masm->mv(c_rarg0, arg); +++ } +++} +++ +++static void pass_arg1(MacroAssembler* masm, Register arg) { +++ if (c_rarg1 != arg) { +++ masm->mv(c_rarg1, arg); +++ } +++} +++ +++static void pass_arg2(MacroAssembler* masm, Register arg) { +++ if (c_rarg2 != arg) { +++ masm->mv(c_rarg2, arg); +++ } +++} +++ +++static void pass_arg3(MacroAssembler* masm, Register arg) { +++ if (c_rarg3 != arg) { +++ masm->mv(c_rarg3, arg); +++ } +++} +++ +++void MacroAssembler::align(int modulus, int extra_offset) { +++ CompressibleRegion cr(this); +++ while ((offset() + extra_offset) % modulus != 0) { nop(); } +++} +++ +++void MacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) { +++ call_VM_base(oop_result, noreg, noreg, entry_point, number_of_arguments, check_exceptions); +++} +++ +++// Implementation of call_VM versions +++ +++void MacroAssembler::call_VM(Register oop_result, +++ address entry_point, +++ bool check_exceptions) { +++ call_VM_helper(oop_result, entry_point, 0, check_exceptions); +++} +++ +++void MacroAssembler::call_VM(Register oop_result, +++ address entry_point, +++ Register arg_1, +++ bool check_exceptions) { +++ pass_arg1(this, arg_1); +++ call_VM_helper(oop_result, entry_point, 1, check_exceptions); +++} +++ +++void MacroAssembler::call_VM(Register oop_result, +++ address entry_point, +++ Register arg_1, +++ Register arg_2, +++ bool check_exceptions) { +++ assert(arg_1 != c_rarg2, "smashed arg"); +++ pass_arg2(this, arg_2); +++ pass_arg1(this, arg_1); +++ call_VM_helper(oop_result, entry_point, 2, check_exceptions); +++} +++ +++void MacroAssembler::call_VM(Register oop_result, +++ address entry_point, +++ Register arg_1, +++ Register arg_2, +++ Register arg_3, +++ bool check_exceptions) { +++ assert(arg_1 != c_rarg3, "smashed arg"); +++ assert(arg_2 != c_rarg3, "smashed arg"); +++ pass_arg3(this, arg_3); +++ +++ assert(arg_1 != c_rarg2, "smashed arg"); +++ pass_arg2(this, arg_2); +++ +++ pass_arg1(this, arg_1); +++ call_VM_helper(oop_result, entry_point, 3, check_exceptions); +++} +++ +++void MacroAssembler::call_VM(Register oop_result, +++ Register last_java_sp, +++ address entry_point, +++ int number_of_arguments, +++ bool check_exceptions) { +++ call_VM_base(oop_result, xthread, last_java_sp, entry_point, number_of_arguments, check_exceptions); +++} +++ +++void MacroAssembler::call_VM(Register oop_result, +++ Register last_java_sp, +++ address entry_point, +++ Register arg_1, +++ bool check_exceptions) { +++ pass_arg1(this, arg_1); +++ call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions); +++} +++ +++void MacroAssembler::call_VM(Register oop_result, +++ Register last_java_sp, +++ address entry_point, +++ Register arg_1, +++ Register arg_2, +++ bool check_exceptions) { +++ +++ assert(arg_1 != c_rarg2, "smashed arg"); +++ pass_arg2(this, arg_2); +++ pass_arg1(this, arg_1); +++ call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions); +++} +++ +++void MacroAssembler::call_VM(Register oop_result, +++ Register last_java_sp, +++ address entry_point, +++ Register arg_1, +++ Register arg_2, +++ Register arg_3, +++ bool check_exceptions) { +++ assert(arg_1 != c_rarg3, "smashed arg"); +++ assert(arg_2 != c_rarg3, "smashed arg"); +++ pass_arg3(this, arg_3); +++ assert(arg_1 != c_rarg2, "smashed arg"); +++ pass_arg2(this, arg_2); +++ pass_arg1(this, arg_1); +++ call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions); +++} +++ +++// these are no-ops overridden by InterpreterMacroAssembler +++void MacroAssembler::check_and_handle_earlyret(Register java_thread) {} +++void MacroAssembler::check_and_handle_popframe(Register java_thread) {} +++ +++// Calls to C land +++// +++// When entering C land, the fp, & esp of the last Java frame have to be recorded +++// in the (thread-local) JavaThread object. When leaving C land, the last Java fp +++// has to be reset to 0. This is required to allow proper stack traversal. +++void MacroAssembler::set_last_Java_frame(Register last_java_sp, +++ Register last_java_fp, +++ Register last_java_pc, +++ Register tmp) { +++ +++ if (last_java_pc->is_valid()) { +++ sd(last_java_pc, Address(xthread, +++ JavaThread::frame_anchor_offset() + +++ JavaFrameAnchor::last_Java_pc_offset())); +++ } +++ +++ // determine last_java_sp register +++ if (last_java_sp == sp) { +++ mv(tmp, sp); +++ last_java_sp = tmp; +++ } else if (!last_java_sp->is_valid()) { +++ last_java_sp = esp; +++ } +++ +++ sd(last_java_sp, Address(xthread, JavaThread::last_Java_sp_offset())); +++ +++ // last_java_fp is optional +++ if (last_java_fp->is_valid()) { +++ sd(last_java_fp, Address(xthread, JavaThread::last_Java_fp_offset())); +++ } +++} +++ +++void MacroAssembler::set_last_Java_frame(Register last_java_sp, +++ Register last_java_fp, +++ address last_java_pc, +++ Register tmp) { +++ assert(last_java_pc != NULL, "must provide a valid PC"); +++ +++ la(tmp, last_java_pc); +++ sd(tmp, Address(xthread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset())); +++ +++ set_last_Java_frame(last_java_sp, last_java_fp, noreg, tmp); +++} +++ +++void MacroAssembler::set_last_Java_frame(Register last_java_sp, +++ Register last_java_fp, +++ Label &L, +++ Register tmp) { +++ if (L.is_bound()) { +++ set_last_Java_frame(last_java_sp, last_java_fp, target(L), tmp); +++ } else { +++ L.add_patch_at(code(), locator()); +++ IncompressibleRegion ir(this); // the label address will be patched back. +++ set_last_Java_frame(last_java_sp, last_java_fp, pc() /* Patched later */, tmp); +++ } +++} +++ +++void MacroAssembler::reset_last_Java_frame(bool clear_fp) { +++ // we must set sp to zero to clear frame +++ sd(zr, Address(xthread, JavaThread::last_Java_sp_offset())); +++ +++ // must clear fp, so that compiled frames are not confused; it is +++ // possible that we need it only for debugging +++ if (clear_fp) { +++ sd(zr, Address(xthread, JavaThread::last_Java_fp_offset())); +++ } +++ +++ // Always clear the pc because it could have been set by make_walkable() +++ sd(zr, Address(xthread, JavaThread::last_Java_pc_offset())); +++} +++ +++void MacroAssembler::call_VM_base(Register oop_result, +++ Register java_thread, +++ Register last_java_sp, +++ address entry_point, +++ int number_of_arguments, +++ bool check_exceptions) { +++ // determine java_thread register +++ if (!java_thread->is_valid()) { +++ java_thread = xthread; +++ } +++ // determine last_java_sp register +++ if (!last_java_sp->is_valid()) { +++ last_java_sp = esp; +++ } +++ +++ // debugging support +++ assert(number_of_arguments >= 0 , "cannot have negative number of arguments"); +++ assert(java_thread == xthread, "unexpected register"); +++ +++ assert(java_thread != oop_result , "cannot use the same register for java_thread & oop_result"); +++ assert(java_thread != last_java_sp, "cannot use the same register for java_thread & last_java_sp"); +++ +++ // push java thread (becomes first argument of C function) +++ mv(c_rarg0, java_thread); +++ +++ // set last Java frame before call +++ assert(last_java_sp != fp, "can't use fp"); +++ +++ Label l; +++ set_last_Java_frame(last_java_sp, fp, l, t0); +++ +++ // do the call, remove parameters +++ MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments, &l); +++ +++ // reset last Java frame +++ // Only interpreter should have to clear fp +++ reset_last_Java_frame(true); +++ +++ // C++ interp handles this in the interpreter +++ check_and_handle_popframe(java_thread); +++ check_and_handle_earlyret(java_thread); +++ +++ if (check_exceptions) { +++ // check for pending exceptions (java_thread is set upon return) +++ ld(t0, Address(java_thread, in_bytes(Thread::pending_exception_offset()))); +++ Label ok; +++ beqz(t0, ok); +++ RuntimeAddress target(StubRoutines::forward_exception_entry()); +++ relocate(target.rspec(), [&] { +++ int32_t offset; +++ la_patchable(t0, target, offset); +++ jalr(x0, t0, offset); +++ }); +++ bind(ok); +++ } +++ +++ // get oop result if there is one and reset the value in the thread +++ if (oop_result->is_valid()) { +++ get_vm_result(oop_result, java_thread); +++ } +++} +++ +++void MacroAssembler::get_vm_result(Register oop_result, Register java_thread) { +++ ld(oop_result, Address(java_thread, JavaThread::vm_result_offset())); +++ sd(zr, Address(java_thread, JavaThread::vm_result_offset())); +++ verify_oop(oop_result, "broken oop in call_VM_base"); +++} +++ +++void MacroAssembler::get_vm_result_2(Register metadata_result, Register java_thread) { +++ ld(metadata_result, Address(java_thread, JavaThread::vm_result_2_offset())); +++ sd(zr, Address(java_thread, JavaThread::vm_result_2_offset())); +++} +++ +++void MacroAssembler::clinit_barrier(Register klass, Register tmp, Label* L_fast_path, Label* L_slow_path) { +++ assert(L_fast_path != NULL || L_slow_path != NULL, "at least one is required"); +++ assert_different_registers(klass, xthread, tmp); +++ +++ Label L_fallthrough, L_tmp; +++ if (L_fast_path == NULL) { +++ L_fast_path = &L_fallthrough; +++ } else if (L_slow_path == NULL) { +++ L_slow_path = &L_fallthrough; +++ } +++ +++ // Fast path check: class is fully initialized +++ lbu(tmp, Address(klass, InstanceKlass::init_state_offset())); +++ sub(tmp, tmp, InstanceKlass::fully_initialized); +++ beqz(tmp, *L_fast_path); +++ +++ // Fast path check: current thread is initializer thread +++ ld(tmp, Address(klass, InstanceKlass::init_thread_offset())); +++ +++ if (L_slow_path == &L_fallthrough) { +++ beq(xthread, tmp, *L_fast_path); +++ bind(*L_slow_path); +++ } else if (L_fast_path == &L_fallthrough) { +++ bne(xthread, tmp, *L_slow_path); +++ bind(*L_fast_path); +++ } else { +++ Unimplemented(); +++ } +++} +++ +++void MacroAssembler::verify_oop(Register reg, const char* s) { +++ if (!VerifyOops) { return; } +++ +++ // Pass register number to verify_oop_subroutine +++ const char* b = NULL; +++ { +++ ResourceMark rm; +++ stringStream ss; +++ ss.print("verify_oop: %s: %s", reg->name(), s); +++ b = code_string(ss.as_string()); +++ } +++ BLOCK_COMMENT("verify_oop {"); +++ +++ push_reg(RegSet::of(ra, t0, t1, c_rarg0), sp); +++ +++ mv(c_rarg0, reg); // c_rarg0 : x10 +++ { +++ // The length of the instruction sequence emitted should not depend +++ // on the address of the char buffer so that the size of mach nodes for +++ // scratch emit and normal emit matches. +++ IncompressibleRegion ir(this); // Fixed length +++ movptr(t0, (address) b); +++ } +++ +++ // call indirectly to solve generation ordering problem +++ ExternalAddress target(StubRoutines::verify_oop_subroutine_entry_address()); +++ relocate(target.rspec(), [&] { +++ int32_t offset; +++ la_patchable(t1, target, offset); +++ ld(t1, Address(t1, offset)); +++ }); +++ jalr(t1); +++ +++ pop_reg(RegSet::of(ra, t0, t1, c_rarg0), sp); +++ +++ BLOCK_COMMENT("} verify_oop"); +++} +++ +++void MacroAssembler::verify_oop_addr(Address addr, const char* s) { +++ if (!VerifyOops) { +++ return; +++ } +++ +++ const char* b = NULL; +++ { +++ ResourceMark rm; +++ stringStream ss; +++ ss.print("verify_oop_addr: %s", s); +++ b = code_string(ss.as_string()); +++ } +++ BLOCK_COMMENT("verify_oop_addr {"); +++ +++ push_reg(RegSet::of(ra, t0, t1, c_rarg0), sp); +++ +++ if (addr.uses(sp)) { +++ la(x10, addr); +++ ld(x10, Address(x10, 4 * wordSize)); +++ } else { +++ ld(x10, addr); +++ } +++ +++ { +++ // The length of the instruction sequence emitted should not depend +++ // on the address of the char buffer so that the size of mach nodes for +++ // scratch emit and normal emit matches. +++ IncompressibleRegion ir(this); // Fixed length +++ movptr(t0, (address) b); +++ } +++ +++ // call indirectly to solve generation ordering problem +++ ExternalAddress target(StubRoutines::verify_oop_subroutine_entry_address()); +++ relocate(target.rspec(), [&] { +++ int32_t offset; +++ la_patchable(t1, target, offset); +++ ld(t1, Address(t1, offset)); +++ }); +++ jalr(t1); +++ +++ pop_reg(RegSet::of(ra, t0, t1, c_rarg0), sp); +++ +++ BLOCK_COMMENT("} verify_oop_addr"); +++} +++ +++Address MacroAssembler::argument_address(RegisterOrConstant arg_slot, +++ int extra_slot_offset) { +++ // cf. TemplateTable::prepare_invoke(), if (load_receiver). +++ int stackElementSize = Interpreter::stackElementSize; +++ int offset = Interpreter::expr_offset_in_bytes(extra_slot_offset+0); +++#ifdef ASSERT +++ int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1); +++ assert(offset1 - offset == stackElementSize, "correct arithmetic"); +++#endif +++ if (arg_slot.is_constant()) { +++ return Address(esp, arg_slot.as_constant() * stackElementSize + offset); +++ } else { +++ assert_different_registers(t0, arg_slot.as_register()); +++ shadd(t0, arg_slot.as_register(), esp, t0, exact_log2(stackElementSize)); +++ return Address(t0, offset); +++ } +++} +++ +++#ifndef PRODUCT +++extern "C" void findpc(intptr_t x); +++#endif +++ +++void MacroAssembler::debug64(char* msg, int64_t pc, int64_t regs[]) +++{ +++ // In order to get locks to work, we need to fake a in_VM state +++ if (ShowMessageBoxOnError) { +++ JavaThread* thread = JavaThread::current(); +++ JavaThreadState saved_state = thread->thread_state(); +++ thread->set_thread_state(_thread_in_vm); +++#ifndef PRODUCT +++ if (CountBytecodes || TraceBytecodes || StopInterpreterAt) { +++ ttyLocker ttyl; +++ BytecodeCounter::print(); +++ } +++#endif +++ if (os::message_box(msg, "Execution stopped, print registers?")) { +++ ttyLocker ttyl; +++ tty->print_cr(" pc = 0x%016lx", pc); +++#ifndef PRODUCT +++ tty->cr(); +++ findpc(pc); +++ tty->cr(); +++#endif +++ tty->print_cr(" x0 = 0x%016lx", regs[0]); +++ tty->print_cr(" x1 = 0x%016lx", regs[1]); +++ tty->print_cr(" x2 = 0x%016lx", regs[2]); +++ tty->print_cr(" x3 = 0x%016lx", regs[3]); +++ tty->print_cr(" x4 = 0x%016lx", regs[4]); +++ tty->print_cr(" x5 = 0x%016lx", regs[5]); +++ tty->print_cr(" x6 = 0x%016lx", regs[6]); +++ tty->print_cr(" x7 = 0x%016lx", regs[7]); +++ tty->print_cr(" x8 = 0x%016lx", regs[8]); +++ tty->print_cr(" x9 = 0x%016lx", regs[9]); +++ tty->print_cr("x10 = 0x%016lx", regs[10]); +++ tty->print_cr("x11 = 0x%016lx", regs[11]); +++ tty->print_cr("x12 = 0x%016lx", regs[12]); +++ tty->print_cr("x13 = 0x%016lx", regs[13]); +++ tty->print_cr("x14 = 0x%016lx", regs[14]); +++ tty->print_cr("x15 = 0x%016lx", regs[15]); +++ tty->print_cr("x16 = 0x%016lx", regs[16]); +++ tty->print_cr("x17 = 0x%016lx", regs[17]); +++ tty->print_cr("x18 = 0x%016lx", regs[18]); +++ tty->print_cr("x19 = 0x%016lx", regs[19]); +++ tty->print_cr("x20 = 0x%016lx", regs[20]); +++ tty->print_cr("x21 = 0x%016lx", regs[21]); +++ tty->print_cr("x22 = 0x%016lx", regs[22]); +++ tty->print_cr("x23 = 0x%016lx", regs[23]); +++ tty->print_cr("x24 = 0x%016lx", regs[24]); +++ tty->print_cr("x25 = 0x%016lx", regs[25]); +++ tty->print_cr("x26 = 0x%016lx", regs[26]); +++ tty->print_cr("x27 = 0x%016lx", regs[27]); +++ tty->print_cr("x28 = 0x%016lx", regs[28]); +++ tty->print_cr("x30 = 0x%016lx", regs[30]); +++ tty->print_cr("x31 = 0x%016lx", regs[31]); +++ BREAKPOINT; +++ } +++ } +++ fatal("DEBUG MESSAGE: %s", msg); +++} +++ +++void MacroAssembler::resolve_jobject(Register value, Register thread, Register tmp) { +++ Label done, not_weak; +++ beqz(value, done); // Use NULL as-is. +++ +++ // Test for jweak tag. +++ test_bit(t0, value, exact_log2(JNIHandles::weak_tag_mask)); +++ beqz(t0, not_weak); +++ +++ // Resolve jweak. +++ access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF, value, +++ Address(value, -JNIHandles::weak_tag_value), tmp, thread); +++ verify_oop(value); +++ j(done); +++ +++ bind(not_weak); +++ // Resolve (untagged) jobject. +++ access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, 0), tmp, thread); +++ verify_oop(value); +++ bind(done); +++} +++ +++void MacroAssembler::stop(const char* msg) { +++ BLOCK_COMMENT(msg); +++ illegal_instruction(Assembler::csr::time); +++ emit_int64((uintptr_t)msg); +++} +++ +++void MacroAssembler::unimplemented(const char* what) { +++ const char* buf = NULL; +++ { +++ ResourceMark rm; +++ stringStream ss; +++ ss.print("unimplemented: %s", what); +++ buf = code_string(ss.as_string()); +++ } +++ stop(buf); +++} +++ +++void MacroAssembler::emit_static_call_stub() { +++ IncompressibleRegion ir(this); // Fixed length: see CompiledStaticCall::to_interp_stub_size(). +++ // CompiledDirectStaticCall::set_to_interpreted knows the +++ // exact layout of this stub. +++ +++ mov_metadata(xmethod, (Metadata*)NULL); +++ +++ // Jump to the entry point of the i2c stub. +++ int32_t offset = 0; +++ movptr(t0, 0, offset); +++ jalr(x0, t0, offset); +++} +++ +++void MacroAssembler::call_VM_leaf_base(address entry_point, +++ int number_of_arguments, +++ Label *retaddr) { +++ push_reg(RegSet::of(t0, xmethod), sp); // push << t0 & xmethod >> to sp +++ call(entry_point); +++ if (retaddr != NULL) { +++ bind(*retaddr); +++ } +++ pop_reg(RegSet::of(t0, xmethod), sp); // pop << t0 & xmethod >> from sp +++} +++ +++void MacroAssembler::call_VM_leaf(address entry_point, int number_of_arguments) { +++ call_VM_leaf_base(entry_point, number_of_arguments); +++} +++ +++void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0) { +++ pass_arg0(this, arg_0); +++ call_VM_leaf_base(entry_point, 1); +++} +++ +++void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1) { +++ pass_arg0(this, arg_0); +++ pass_arg1(this, arg_1); +++ call_VM_leaf_base(entry_point, 2); +++} +++ +++void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, +++ Register arg_1, Register arg_2) { +++ pass_arg0(this, arg_0); +++ pass_arg1(this, arg_1); +++ pass_arg2(this, arg_2); +++ call_VM_leaf_base(entry_point, 3); +++} +++ +++void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) { +++ pass_arg0(this, arg_0); +++ MacroAssembler::call_VM_leaf_base(entry_point, 1); +++} +++ +++void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1) { +++ +++ assert(arg_0 != c_rarg1, "smashed arg"); +++ pass_arg1(this, arg_1); +++ pass_arg0(this, arg_0); +++ MacroAssembler::call_VM_leaf_base(entry_point, 2); +++} +++ +++void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) { +++ assert(arg_0 != c_rarg2, "smashed arg"); +++ assert(arg_1 != c_rarg2, "smashed arg"); +++ pass_arg2(this, arg_2); +++ assert(arg_0 != c_rarg1, "smashed arg"); +++ pass_arg1(this, arg_1); +++ pass_arg0(this, arg_0); +++ MacroAssembler::call_VM_leaf_base(entry_point, 3); +++} +++ +++void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) { +++ assert(arg_0 != c_rarg3, "smashed arg"); +++ assert(arg_1 != c_rarg3, "smashed arg"); +++ assert(arg_2 != c_rarg3, "smashed arg"); +++ pass_arg3(this, arg_3); +++ assert(arg_0 != c_rarg2, "smashed arg"); +++ assert(arg_1 != c_rarg2, "smashed arg"); +++ pass_arg2(this, arg_2); +++ assert(arg_0 != c_rarg1, "smashed arg"); +++ pass_arg1(this, arg_1); +++ pass_arg0(this, arg_0); +++ MacroAssembler::call_VM_leaf_base(entry_point, 4); +++} +++ +++void MacroAssembler::la(Register Rd, const address dest) { +++ int64_t offset = dest - pc(); +++ if (is_simm32(offset)) { +++ auipc(Rd, (int32_t)offset + 0x800); //0x800, Note:the 11th sign bit +++ addi(Rd, Rd, ((int64_t)offset << 52) >> 52); +++ } else { +++ movptr(Rd, dest); +++ } +++} +++ +++void MacroAssembler::la(Register Rd, const Address &adr) { +++ switch (adr.getMode()) { +++ case Address::literal: { +++ relocInfo::relocType rtype = adr.rspec().reloc()->type(); +++ if (rtype == relocInfo::none) { +++ mv(Rd, (intptr_t)(adr.target())); +++ } else { +++ relocate(adr.rspec(), [&] { +++ movptr(Rd, adr.target()); +++ }); +++ } +++ break; +++ } +++ case Address::base_plus_offset: { +++ Address new_adr = legitimize_address(Rd, adr); +++ if (!(new_adr.base() == Rd && new_adr.offset() == 0)) { +++ addi(Rd, new_adr.base(), new_adr.offset()); +++ } +++ break; +++ } +++ default: +++ ShouldNotReachHere(); +++ } +++} +++ +++void MacroAssembler::la(Register Rd, Label &label) { +++ IncompressibleRegion ir(this); // the label address may be patched back. +++ la(Rd, target(label)); +++} +++ +++void MacroAssembler::li32(Register Rd, int32_t imm) { +++ // int32_t is in range 0x8000 0000 ~ 0x7fff ffff, and imm[31] is the sign bit +++ int64_t upper = imm, lower = imm; +++ lower = (imm << 20) >> 20; +++ upper -= lower; +++ upper = (int32_t)upper; +++ // lui Rd, imm[31:12] + imm[11] +++ lui(Rd, upper); +++ // use addiw to distinguish li32 to li64 +++ addiw(Rd, Rd, lower); +++} +++ +++void MacroAssembler::li64(Register Rd, int64_t imm) { +++ // Load upper 32 bits. upper = imm[63:32], but if imm[31] == 1 or +++ // (imm[31:20] == 0x7ff && imm[19] == 1), upper = imm[63:32] + 1. +++ int64_t lower = imm & 0xffffffff; +++ lower -= ((lower << 44) >> 44); +++ int64_t tmp_imm = ((uint64_t)(imm & 0xffffffff00000000)) + (uint64_t)lower; +++ int32_t upper = (tmp_imm - (int32_t)lower) >> 32; +++ +++ // Load upper 32 bits +++ int64_t up = upper, lo = upper; +++ lo = (lo << 52) >> 52; +++ up -= lo; +++ up = (int32_t)up; +++ lui(Rd, up); +++ addi(Rd, Rd, lo); +++ +++ // Load the rest 32 bits. +++ slli(Rd, Rd, 12); +++ addi(Rd, Rd, (int32_t)lower >> 20); +++ slli(Rd, Rd, 12); +++ lower = ((int32_t)imm << 12) >> 20; +++ addi(Rd, Rd, lower); +++ slli(Rd, Rd, 8); +++ lower = imm & 0xff; +++ addi(Rd, Rd, lower); +++} +++ +++void MacroAssembler::li(Register Rd, int64_t imm) { +++ // int64_t is in range 0x8000 0000 0000 0000 ~ 0x7fff ffff ffff ffff +++ // li -> c.li +++ if (do_compress() && (is_simm6(imm) && Rd != x0)) { +++ c_li(Rd, imm); +++ return; +++ } +++ +++ int shift = 12; +++ int64_t upper = imm, lower = imm; +++ // Split imm to a lower 12-bit sign-extended part and the remainder, +++ // because addi will sign-extend the lower imm. +++ lower = ((int32_t)imm << 20) >> 20; +++ upper -= lower; +++ +++ // Test whether imm is a 32-bit integer. +++ if (!(((imm) & ~(int64_t)0x7fffffff) == 0 || +++ (((imm) & ~(int64_t)0x7fffffff) == ~(int64_t)0x7fffffff))) { +++ while (((upper >> shift) & 1) == 0) { shift++; } +++ upper >>= shift; +++ li(Rd, upper); +++ slli(Rd, Rd, shift); +++ if (lower != 0) { +++ addi(Rd, Rd, lower); +++ } +++ } else { +++ // 32-bit integer +++ Register hi_Rd = zr; +++ if (upper != 0) { +++ lui(Rd, (int32_t)upper); +++ hi_Rd = Rd; +++ } +++ if (lower != 0 || hi_Rd == zr) { +++ addiw(Rd, hi_Rd, lower); +++ } +++ } +++} +++ +++#define INSN(NAME, REGISTER) \ +++ void MacroAssembler::NAME(const address dest, Register temp) { \ +++ assert_cond(dest != NULL); \ +++ int64_t distance = dest - pc(); \ +++ if (is_simm21(distance) && ((distance % 2) == 0)) { \ +++ Assembler::jal(REGISTER, distance); \ +++ } else { \ +++ assert(temp != noreg, "expecting a register"); \ +++ int32_t offset = 0; \ +++ movptr(temp, dest, offset); \ +++ Assembler::jalr(REGISTER, temp, offset); \ +++ } \ +++ } \ +++ +++ INSN(j, x0); +++ INSN(jal, x1); +++ +++#undef INSN +++ +++#define INSN(NAME, REGISTER) \ +++ void MacroAssembler::NAME(const Address &adr, Register temp) { \ +++ switch (adr.getMode()) { \ +++ case Address::literal: { \ +++ relocate(adr.rspec()); \ +++ NAME(adr.target(), temp); \ +++ break; \ +++ } \ +++ case Address::base_plus_offset: { \ +++ int32_t offset = ((int32_t)adr.offset() << 20) >> 20; \ +++ la(temp, Address(adr.base(), adr.offset() - offset)); \ +++ Assembler::jalr(REGISTER, temp, offset); \ +++ break; \ +++ } \ +++ default: \ +++ ShouldNotReachHere(); \ +++ } \ +++ } +++ +++ INSN(j, x0); +++ INSN(jal, x1); +++ +++#undef INSN +++ +++#define INSN(NAME) \ +++ void MacroAssembler::NAME(Register Rd, const address dest, Register temp) { \ +++ assert_cond(dest != NULL); \ +++ int64_t distance = dest - pc(); \ +++ if (is_simm21(distance) && ((distance % 2) == 0)) { \ +++ Assembler::NAME(Rd, distance); \ +++ } else { \ +++ assert_different_registers(Rd, temp); \ +++ int32_t offset = 0; \ +++ movptr(temp, dest, offset); \ +++ jalr(Rd, temp, offset); \ +++ } \ +++ } \ +++ void MacroAssembler::NAME(Register Rd, Label &L, Register temp) { \ +++ assert_different_registers(Rd, temp); \ +++ wrap_label(Rd, L, temp, &MacroAssembler::NAME); \ +++ } +++ +++ INSN(jal); +++ +++#undef INSN +++ +++#define INSN(NAME, REGISTER) \ +++ void MacroAssembler::NAME(Label &l, Register temp) { \ +++ jal(REGISTER, l, temp); \ +++ } \ +++ +++ INSN(j, x0); +++ INSN(jal, x1); +++ +++#undef INSN +++ +++void MacroAssembler::wrap_label(Register Rt, Label &L, Register tmp, load_insn_by_temp insn) { +++ if (L.is_bound()) { +++ (this->*insn)(Rt, target(L), tmp); +++ } else { +++ L.add_patch_at(code(), locator()); +++ (this->*insn)(Rt, pc(), tmp); +++ } +++} +++ +++void MacroAssembler::wrap_label(Register Rt, Label &L, jal_jalr_insn insn) { +++ if (L.is_bound()) { +++ (this->*insn)(Rt, target(L)); +++ } else { +++ L.add_patch_at(code(), locator()); +++ (this->*insn)(Rt, pc()); +++ } +++} +++ +++void MacroAssembler::wrap_label(Register r1, Register r2, Label &L, +++ compare_and_branch_insn insn, +++ compare_and_branch_label_insn neg_insn, bool is_far) { +++ if (is_far) { +++ Label done; +++ (this->*neg_insn)(r1, r2, done, /* is_far */ false); +++ j(L); +++ bind(done); +++ } else { +++ if (L.is_bound()) { +++ (this->*insn)(r1, r2, target(L)); +++ } else { +++ L.add_patch_at(code(), locator()); +++ (this->*insn)(r1, r2, pc()); +++ } +++ } +++} +++ +++#define INSN(NAME, NEG_INSN) \ +++ void MacroAssembler::NAME(Register Rs1, Register Rs2, Label &L, bool is_far) { \ +++ wrap_label(Rs1, Rs2, L, &MacroAssembler::NAME, &MacroAssembler::NEG_INSN, is_far); \ +++ } +++ +++ INSN(beq, bne); +++ INSN(bne, beq); +++ INSN(blt, bge); +++ INSN(bge, blt); +++ INSN(bltu, bgeu); +++ INSN(bgeu, bltu); +++ +++#undef INSN +++ +++#define INSN(NAME) \ +++ void MacroAssembler::NAME##z(Register Rs, const address dest) { \ +++ NAME(Rs, zr, dest); \ +++ } \ +++ void MacroAssembler::NAME##z(Register Rs, Label &l, bool is_far) { \ +++ NAME(Rs, zr, l, is_far); \ +++ } \ +++ +++ INSN(beq); +++ INSN(bne); +++ INSN(blt); +++ INSN(ble); +++ INSN(bge); +++ INSN(bgt); +++ +++#undef INSN +++ +++#define INSN(NAME, NEG_INSN) \ +++ void MacroAssembler::NAME(Register Rs, Register Rt, const address dest) { \ +++ NEG_INSN(Rt, Rs, dest); \ +++ } \ +++ void MacroAssembler::NAME(Register Rs, Register Rt, Label &l, bool is_far) { \ +++ NEG_INSN(Rt, Rs, l, is_far); \ +++ } +++ +++ INSN(bgt, blt); +++ INSN(ble, bge); +++ INSN(bgtu, bltu); +++ INSN(bleu, bgeu); +++ +++#undef INSN +++ +++// Float compare branch instructions +++ +++#define INSN(NAME, FLOATCMP, BRANCH) \ +++ void MacroAssembler::float_##NAME(FloatRegister Rs1, FloatRegister Rs2, Label &l, bool is_far, bool is_unordered) { \ +++ FLOATCMP##_s(t0, Rs1, Rs2); \ +++ BRANCH(t0, l, is_far); \ +++ } \ +++ void MacroAssembler::double_##NAME(FloatRegister Rs1, FloatRegister Rs2, Label &l, bool is_far, bool is_unordered) { \ +++ FLOATCMP##_d(t0, Rs1, Rs2); \ +++ BRANCH(t0, l, is_far); \ +++ } +++ +++ INSN(beq, feq, bnez); +++ INSN(bne, feq, beqz); +++ +++#undef INSN +++ +++ +++#define INSN(NAME, FLOATCMP1, FLOATCMP2) \ +++ void MacroAssembler::float_##NAME(FloatRegister Rs1, FloatRegister Rs2, Label &l, \ +++ bool is_far, bool is_unordered) { \ +++ if (is_unordered) { \ +++ /* jump if either source is NaN or condition is expected */ \ +++ FLOATCMP2##_s(t0, Rs2, Rs1); \ +++ beqz(t0, l, is_far); \ +++ } else { \ +++ /* jump if no NaN in source and condition is expected */ \ +++ FLOATCMP1##_s(t0, Rs1, Rs2); \ +++ bnez(t0, l, is_far); \ +++ } \ +++ } \ +++ void MacroAssembler::double_##NAME(FloatRegister Rs1, FloatRegister Rs2, Label &l, \ +++ bool is_far, bool is_unordered) { \ +++ if (is_unordered) { \ +++ /* jump if either source is NaN or condition is expected */ \ +++ FLOATCMP2##_d(t0, Rs2, Rs1); \ +++ beqz(t0, l, is_far); \ +++ } else { \ +++ /* jump if no NaN in source and condition is expected */ \ +++ FLOATCMP1##_d(t0, Rs1, Rs2); \ +++ bnez(t0, l, is_far); \ +++ } \ +++ } +++ +++ INSN(ble, fle, flt); +++ INSN(blt, flt, fle); +++ +++#undef INSN +++ +++#define INSN(NAME, CMP) \ +++ void MacroAssembler::float_##NAME(FloatRegister Rs1, FloatRegister Rs2, Label &l, \ +++ bool is_far, bool is_unordered) { \ +++ float_##CMP(Rs2, Rs1, l, is_far, is_unordered); \ +++ } \ +++ void MacroAssembler::double_##NAME(FloatRegister Rs1, FloatRegister Rs2, Label &l, \ +++ bool is_far, bool is_unordered) { \ +++ double_##CMP(Rs2, Rs1, l, is_far, is_unordered); \ +++ } +++ +++ INSN(bgt, blt); +++ INSN(bge, ble); +++ +++#undef INSN +++ +++ +++#define INSN(NAME, CSR) \ +++ void MacroAssembler::NAME(Register Rd) { \ +++ csrr(Rd, CSR); \ +++ } +++ +++ INSN(rdinstret, CSR_INSTRET); +++ INSN(rdcycle, CSR_CYCLE); +++ INSN(rdtime, CSR_TIME); +++ INSN(frcsr, CSR_FCSR); +++ INSN(frrm, CSR_FRM); +++ INSN(frflags, CSR_FFLAGS); +++ +++#undef INSN +++ +++void MacroAssembler::csrr(Register Rd, unsigned csr) { +++ csrrs(Rd, csr, x0); +++} +++ +++#define INSN(NAME, OPFUN) \ +++ void MacroAssembler::NAME(unsigned csr, Register Rs) { \ +++ OPFUN(x0, csr, Rs); \ +++ } +++ +++ INSN(csrw, csrrw); +++ INSN(csrs, csrrs); +++ INSN(csrc, csrrc); +++ +++#undef INSN +++ +++#define INSN(NAME, OPFUN) \ +++ void MacroAssembler::NAME(unsigned csr, unsigned imm) { \ +++ OPFUN(x0, csr, imm); \ +++ } +++ +++ INSN(csrwi, csrrwi); +++ INSN(csrsi, csrrsi); +++ INSN(csrci, csrrci); +++ +++#undef INSN +++ +++#define INSN(NAME, CSR) \ +++ void MacroAssembler::NAME(Register Rd, Register Rs) { \ +++ csrrw(Rd, CSR, Rs); \ +++ } +++ +++ INSN(fscsr, CSR_FCSR); +++ INSN(fsrm, CSR_FRM); +++ INSN(fsflags, CSR_FFLAGS); +++ +++#undef INSN +++ +++#define INSN(NAME) \ +++ void MacroAssembler::NAME(Register Rs) { \ +++ NAME(x0, Rs); \ +++ } +++ +++ INSN(fscsr); +++ INSN(fsrm); +++ INSN(fsflags); +++ +++#undef INSN +++ +++void MacroAssembler::fsrmi(Register Rd, unsigned imm) { +++ guarantee(imm < 5, "Rounding Mode is invalid in Rounding Mode register"); +++ csrrwi(Rd, CSR_FRM, imm); +++} +++ +++void MacroAssembler::fsflagsi(Register Rd, unsigned imm) { +++ csrrwi(Rd, CSR_FFLAGS, imm); +++} +++ +++#define INSN(NAME) \ +++ void MacroAssembler::NAME(unsigned imm) { \ +++ NAME(x0, imm); \ +++ } +++ +++ INSN(fsrmi); +++ INSN(fsflagsi); +++ +++#undef INSN +++ +++void MacroAssembler::push_reg(Register Rs) +++{ +++ addi(esp, esp, 0 - wordSize); +++ sd(Rs, Address(esp, 0)); +++} +++ +++void MacroAssembler::pop_reg(Register Rd) +++{ +++ ld(Rd, Address(esp, 0)); +++ addi(esp, esp, wordSize); +++} +++ +++int MacroAssembler::bitset_to_regs(unsigned int bitset, unsigned char* regs) { +++ int count = 0; +++ // Scan bitset to accumulate register pairs +++ for (int reg = 31; reg >= 0; reg--) { +++ if ((1U << 31) & bitset) { +++ regs[count++] = reg; +++ } +++ bitset <<= 1; +++ } +++ return count; +++} +++ +++// Push integer registers in the bitset supplied. Don't push sp. +++// Return the number of words pushed +++int MacroAssembler::push_reg(unsigned int bitset, Register stack) { +++ DEBUG_ONLY(int words_pushed = 0;) +++ unsigned char regs[32]; +++ int count = bitset_to_regs(bitset, regs); +++ // reserve one slot to align for odd count +++ int offset = is_even(count) ? 0 : wordSize; +++ +++ if (count) { +++ addi(stack, stack, -count * wordSize - offset); +++ } +++ for (int i = count - 1; i >= 0; i--) { +++ sd(as_Register(regs[i]), Address(stack, (count - 1 - i) * wordSize + offset)); +++ DEBUG_ONLY(words_pushed++;) +++ } +++ +++ assert(words_pushed == count, "oops, pushed != count"); +++ +++ return count; +++} +++ +++int MacroAssembler::pop_reg(unsigned int bitset, Register stack) { +++ DEBUG_ONLY(int words_popped = 0;) +++ unsigned char regs[32]; +++ int count = bitset_to_regs(bitset, regs); +++ // reserve one slot to align for odd count +++ int offset = is_even(count) ? 0 : wordSize; +++ +++ for (int i = count - 1; i >= 0; i--) { +++ ld(as_Register(regs[i]), Address(stack, (count - 1 - i) * wordSize + offset)); +++ DEBUG_ONLY(words_popped++;) +++ } +++ +++ if (count) { +++ addi(stack, stack, count * wordSize + offset); +++ } +++ assert(words_popped == count, "oops, popped != count"); +++ +++ return count; +++} +++ +++// Push floating-point registers in the bitset supplied. +++// Return the number of words pushed +++int MacroAssembler::push_fp(unsigned int bitset, Register stack) { +++ DEBUG_ONLY(int words_pushed = 0;) +++ unsigned char regs[32]; +++ int count = bitset_to_regs(bitset, regs); +++ int push_slots = count + (count & 1); +++ +++ if (count) { +++ addi(stack, stack, -push_slots * wordSize); +++ } +++ +++ for (int i = count - 1; i >= 0; i--) { +++ fsd(as_FloatRegister(regs[i]), Address(stack, (push_slots - 1 - i) * wordSize)); +++ DEBUG_ONLY(words_pushed++;) +++ } +++ +++ assert(words_pushed == count, "oops, pushed(%d) != count(%d)", words_pushed, count); +++ +++ return count; +++} +++ +++int MacroAssembler::pop_fp(unsigned int bitset, Register stack) { +++ DEBUG_ONLY(int words_popped = 0;) +++ unsigned char regs[32]; +++ int count = bitset_to_regs(bitset, regs); +++ int pop_slots = count + (count & 1); +++ +++ for (int i = count - 1; i >= 0; i--) { +++ fld(as_FloatRegister(regs[i]), Address(stack, (pop_slots - 1 - i) * wordSize)); +++ DEBUG_ONLY(words_popped++;) +++ } +++ +++ if (count) { +++ addi(stack, stack, pop_slots * wordSize); +++ } +++ +++ assert(words_popped == count, "oops, popped(%d) != count(%d)", words_popped, count); +++ +++ return count; +++} +++ +++#ifdef COMPILER2 +++// Push vector registers in the bitset supplied. +++// Return the number of words pushed +++int MacroAssembler::push_v(unsigned int bitset, Register stack) { +++ int vector_size_in_bytes = Matcher::scalable_vector_reg_size(T_BYTE); +++ +++ // Scan bitset to accumulate register pairs +++ unsigned char regs[32]; +++ int count = bitset_to_regs(bitset, regs); +++ +++ for (int i = 0; i < count; i++) { +++ sub(stack, stack, vector_size_in_bytes); +++ vs1r_v(as_VectorRegister(regs[i]), stack); +++ } +++ +++ return count * vector_size_in_bytes / wordSize; +++} +++ +++int MacroAssembler::pop_v(unsigned int bitset, Register stack) { +++ int vector_size_in_bytes = Matcher::scalable_vector_reg_size(T_BYTE); +++ +++ // Scan bitset to accumulate register pairs +++ unsigned char regs[32]; +++ int count = bitset_to_regs(bitset, regs); +++ +++ for (int i = count - 1; i >= 0; i--) { +++ vl1re8_v(as_VectorRegister(regs[i]), stack); +++ add(stack, stack, vector_size_in_bytes); +++ } +++ +++ return count * vector_size_in_bytes / wordSize; +++} +++#endif // COMPILER2 +++ +++void MacroAssembler::push_call_clobbered_registers_except(RegSet exclude) { +++ // Push integer registers x7, x10-x17, x28-x31. +++ push_reg(RegSet::of(x7) + RegSet::range(x10, x17) + RegSet::range(x28, x31) - exclude, sp); +++ +++ // Push float registers f0-f7, f10-f17, f28-f31. +++ addi(sp, sp, - wordSize * 20); +++ int offset = 0; +++ for (int i = 0; i < 32; i++) { +++ if (i <= f7->encoding() || i >= f28->encoding() || (i >= f10->encoding() && i <= f17->encoding())) { +++ fsd(as_FloatRegister(i), Address(sp, wordSize * (offset++))); +++ } +++ } +++} +++ +++void MacroAssembler::pop_call_clobbered_registers_except(RegSet exclude) { +++ int offset = 0; +++ for (int i = 0; i < 32; i++) { +++ if (i <= f7->encoding() || i >= f28->encoding() || (i >= f10->encoding() && i <= f17->encoding())) { +++ fld(as_FloatRegister(i), Address(sp, wordSize * (offset++))); +++ } +++ } +++ addi(sp, sp, wordSize * 20); +++ +++ pop_reg(RegSet::of(x7) + RegSet::range(x10, x17) + RegSet::range(x28, x31) - exclude, sp); +++} +++ +++void MacroAssembler::push_CPU_state(bool save_vectors, int vector_size_in_bytes) { +++ // integer registers, except zr(x0) & ra(x1) & sp(x2) & gp(x3) & tp(x4) +++ push_reg(RegSet::range(x5, x31), sp); +++ +++ // float registers +++ addi(sp, sp, - 32 * wordSize); +++ for (int i = 0; i < 32; i++) { +++ fsd(as_FloatRegister(i), Address(sp, i * wordSize)); +++ } +++ +++ // vector registers +++ if (save_vectors) { +++ sub(sp, sp, vector_size_in_bytes * VectorRegisterImpl::number_of_registers); +++ vsetvli(t0, x0, Assembler::e64, Assembler::m8); +++ for (int i = 0; i < VectorRegisterImpl::number_of_registers; i += 8) { +++ add(t0, sp, vector_size_in_bytes * i); +++ vse64_v(as_VectorRegister(i), t0); +++ } +++ } +++} +++ +++void MacroAssembler::pop_CPU_state(bool restore_vectors, int vector_size_in_bytes) { +++ // vector registers +++ if (restore_vectors) { +++ vsetvli(t0, x0, Assembler::e64, Assembler::m8); +++ for (int i = 0; i < VectorRegisterImpl::number_of_registers; i += 8) { +++ vle64_v(as_VectorRegister(i), sp); +++ add(sp, sp, vector_size_in_bytes * 8); +++ } +++ } +++ +++ // float registers +++ for (int i = 0; i < 32; i++) { +++ fld(as_FloatRegister(i), Address(sp, i * wordSize)); +++ } +++ addi(sp, sp, 32 * wordSize); +++ +++ // integer registers, except zr(x0) & ra(x1) & sp(x2) & gp(x3) & tp(x4) +++ pop_reg(RegSet::range(x5, x31), sp); +++} +++ +++static int patch_offset_in_jal(address branch, int64_t offset) { +++ assert(Assembler::is_simm21(offset) && ((offset % 2) == 0), +++ "offset is too large to be patched in one jal instruction!\n"); +++ Assembler::patch(branch, 31, 31, (offset >> 20) & 0x1); // offset[20] ==> branch[31] +++ Assembler::patch(branch, 30, 21, (offset >> 1) & 0x3ff); // offset[10:1] ==> branch[30:21] +++ Assembler::patch(branch, 20, 20, (offset >> 11) & 0x1); // offset[11] ==> branch[20] +++ Assembler::patch(branch, 19, 12, (offset >> 12) & 0xff); // offset[19:12] ==> branch[19:12] +++ return NativeInstruction::instruction_size; // only one instruction +++} +++ +++static int patch_offset_in_conditional_branch(address branch, int64_t offset) { +++ assert(Assembler::is_simm13(offset) && ((offset % 2) == 0), +++ "offset is too large to be patched in one beq/bge/bgeu/blt/bltu/bne instruction!\n"); +++ Assembler::patch(branch, 31, 31, (offset >> 12) & 0x1); // offset[12] ==> branch[31] +++ Assembler::patch(branch, 30, 25, (offset >> 5) & 0x3f); // offset[10:5] ==> branch[30:25] +++ Assembler::patch(branch, 7, 7, (offset >> 11) & 0x1); // offset[11] ==> branch[7] +++ Assembler::patch(branch, 11, 8, (offset >> 1) & 0xf); // offset[4:1] ==> branch[11:8] +++ return NativeInstruction::instruction_size; // only one instruction +++} +++ +++static int patch_offset_in_pc_relative(address branch, int64_t offset) { +++ const int PC_RELATIVE_INSTRUCTION_NUM = 2; // auipc, addi/jalr/load +++ Assembler::patch(branch, 31, 12, ((offset + 0x800) >> 12) & 0xfffff); // Auipc. offset[31:12] ==> branch[31:12] +++ Assembler::patch(branch + 4, 31, 20, offset & 0xfff); // Addi/Jalr/Load. offset[11:0] ==> branch[31:20] +++ return PC_RELATIVE_INSTRUCTION_NUM * NativeInstruction::instruction_size; +++} +++ +++static int patch_addr_in_movptr(address branch, address target) { +++ const int MOVPTR_INSTRUCTIONS_NUM = 6; // lui + addi + slli + addi + slli + addi/jalr/load +++ int32_t lower = ((intptr_t)target << 35) >> 35; +++ int64_t upper = ((intptr_t)target - lower) >> 29; +++ Assembler::patch(branch + 0, 31, 12, upper & 0xfffff); // Lui. target[48:29] + target[28] ==> branch[31:12] +++ Assembler::patch(branch + 4, 31, 20, (lower >> 17) & 0xfff); // Addi. target[28:17] ==> branch[31:20] +++ Assembler::patch(branch + 12, 31, 20, (lower >> 6) & 0x7ff); // Addi. target[16: 6] ==> branch[31:20] +++ Assembler::patch(branch + 20, 31, 20, lower & 0x3f); // Addi/Jalr/Load. target[ 5: 0] ==> branch[31:20] +++ return MOVPTR_INSTRUCTIONS_NUM * NativeInstruction::instruction_size; +++} +++ +++static int patch_imm_in_li64(address branch, address target) { +++ const int LI64_INSTRUCTIONS_NUM = 8; // lui + addi + slli + addi + slli + addi + slli + addi +++ int64_t lower = (intptr_t)target & 0xffffffff; +++ lower = lower - ((lower << 44) >> 44); +++ int64_t tmp_imm = ((uint64_t)((intptr_t)target & 0xffffffff00000000)) + (uint64_t)lower; +++ int32_t upper = (tmp_imm - (int32_t)lower) >> 32; +++ int64_t tmp_upper = upper, tmp_lower = upper; +++ tmp_lower = (tmp_lower << 52) >> 52; +++ tmp_upper -= tmp_lower; +++ tmp_upper >>= 12; +++ // Load upper 32 bits. Upper = target[63:32], but if target[31] = 1 or (target[31:20] == 0x7ff && target[19] == 1), +++ // upper = target[63:32] + 1. +++ Assembler::patch(branch + 0, 31, 12, tmp_upper & 0xfffff); // Lui. +++ Assembler::patch(branch + 4, 31, 20, tmp_lower & 0xfff); // Addi. +++ // Load the rest 32 bits. +++ Assembler::patch(branch + 12, 31, 20, ((int32_t)lower >> 20) & 0xfff); // Addi. +++ Assembler::patch(branch + 20, 31, 20, (((intptr_t)target << 44) >> 52) & 0xfff); // Addi. +++ Assembler::patch(branch + 28, 31, 20, (intptr_t)target & 0xff); // Addi. +++ return LI64_INSTRUCTIONS_NUM * NativeInstruction::instruction_size; +++} +++ +++static int patch_imm_in_li32(address branch, int32_t target) { +++ const int LI32_INSTRUCTIONS_NUM = 2; // lui + addiw +++ int64_t upper = (intptr_t)target; +++ int32_t lower = (((int32_t)target) << 20) >> 20; +++ upper -= lower; +++ upper = (int32_t)upper; +++ Assembler::patch(branch + 0, 31, 12, (upper >> 12) & 0xfffff); // Lui. +++ Assembler::patch(branch + 4, 31, 20, lower & 0xfff); // Addiw. +++ return LI32_INSTRUCTIONS_NUM * NativeInstruction::instruction_size; +++} +++ +++static long get_offset_of_jal(address insn_addr) { +++ assert_cond(insn_addr != NULL); +++ long offset = 0; +++ unsigned insn = *(unsigned*)insn_addr; +++ long val = (long)Assembler::sextract(insn, 31, 12); +++ offset |= ((val >> 19) & 0x1) << 20; +++ offset |= (val & 0xff) << 12; +++ offset |= ((val >> 8) & 0x1) << 11; +++ offset |= ((val >> 9) & 0x3ff) << 1; +++ offset = (offset << 43) >> 43; +++ return offset; +++} +++ +++static long get_offset_of_conditional_branch(address insn_addr) { +++ long offset = 0; +++ assert_cond(insn_addr != NULL); +++ unsigned insn = *(unsigned*)insn_addr; +++ offset = (long)Assembler::sextract(insn, 31, 31); +++ offset = (offset << 12) | (((long)(Assembler::sextract(insn, 7, 7) & 0x1)) << 11); +++ offset = offset | (((long)(Assembler::sextract(insn, 30, 25) & 0x3f)) << 5); +++ offset = offset | (((long)(Assembler::sextract(insn, 11, 8) & 0xf)) << 1); +++ offset = (offset << 41) >> 41; +++ return offset; +++} +++ +++static long get_offset_of_pc_relative(address insn_addr) { +++ long offset = 0; +++ assert_cond(insn_addr != NULL); +++ offset = ((long)(Assembler::sextract(((unsigned*)insn_addr)[0], 31, 12))) << 12; // Auipc. +++ offset += ((long)Assembler::sextract(((unsigned*)insn_addr)[1], 31, 20)); // Addi/Jalr/Load. +++ offset = (offset << 32) >> 32; +++ return offset; +++} +++ +++static address get_target_of_movptr(address insn_addr) { +++ assert_cond(insn_addr != NULL); +++ intptr_t target_address = (((int64_t)Assembler::sextract(((unsigned*)insn_addr)[0], 31, 12)) & 0xfffff) << 29; // Lui. +++ target_address += ((int64_t)Assembler::sextract(((unsigned*)insn_addr)[1], 31, 20)) << 17; // Addi. +++ target_address += ((int64_t)Assembler::sextract(((unsigned*)insn_addr)[3], 31, 20)) << 6; // Addi. +++ target_address += ((int64_t)Assembler::sextract(((unsigned*)insn_addr)[5], 31, 20)); // Addi/Jalr/Load. +++ return (address) target_address; +++} +++ +++static address get_target_of_li64(address insn_addr) { +++ assert_cond(insn_addr != NULL); +++ intptr_t target_address = (((int64_t)Assembler::sextract(((unsigned*)insn_addr)[0], 31, 12)) & 0xfffff) << 44; // Lui. +++ target_address += ((int64_t)Assembler::sextract(((unsigned*)insn_addr)[1], 31, 20)) << 32; // Addi. +++ target_address += ((int64_t)Assembler::sextract(((unsigned*)insn_addr)[3], 31, 20)) << 20; // Addi. +++ target_address += ((int64_t)Assembler::sextract(((unsigned*)insn_addr)[5], 31, 20)) << 8; // Addi. +++ target_address += ((int64_t)Assembler::sextract(((unsigned*)insn_addr)[7], 31, 20)); // Addi. +++ return (address)target_address; +++} +++ +++static address get_target_of_li32(address insn_addr) { +++ assert_cond(insn_addr != NULL); +++ intptr_t target_address = (((int64_t)Assembler::sextract(((unsigned*)insn_addr)[0], 31, 12)) & 0xfffff) << 12; // Lui. +++ target_address += ((int64_t)Assembler::sextract(((unsigned*)insn_addr)[1], 31, 20)); // Addiw. +++ return (address)target_address; +++} +++ +++// Patch any kind of instruction; there may be several instructions. +++// Return the total length (in bytes) of the instructions. +++int MacroAssembler::pd_patch_instruction_size(address branch, address target) { +++ assert_cond(branch != NULL); +++ int64_t offset = target - branch; +++ if (NativeInstruction::is_jal_at(branch)) { // jal +++ return patch_offset_in_jal(branch, offset); +++ } else if (NativeInstruction::is_branch_at(branch)) { // beq/bge/bgeu/blt/bltu/bne +++ return patch_offset_in_conditional_branch(branch, offset); +++ } else if (NativeInstruction::is_pc_relative_at(branch)) { // auipc, addi/jalr/load +++ return patch_offset_in_pc_relative(branch, offset); +++ } else if (NativeInstruction::is_movptr_at(branch)) { // movptr +++ return patch_addr_in_movptr(branch, target); +++ } else if (NativeInstruction::is_li64_at(branch)) { // li64 +++ return patch_imm_in_li64(branch, target); +++ } else if (NativeInstruction::is_li32_at(branch)) { // li32 +++ int64_t imm = (intptr_t)target; +++ return patch_imm_in_li32(branch, (int32_t)imm); +++ } else { +++#ifdef ASSERT +++ tty->print_cr("pd_patch_instruction_size: instruction 0x%x at " INTPTR_FORMAT " could not be patched!\n", +++ *(unsigned*)branch, p2i(branch)); +++ Disassembler::decode(branch - 16, branch + 16); +++#endif +++ ShouldNotReachHere(); +++ return -1; +++ } +++} +++ +++address MacroAssembler::target_addr_for_insn(address insn_addr) { +++ long offset = 0; +++ assert_cond(insn_addr != NULL); +++ if (NativeInstruction::is_jal_at(insn_addr)) { // jal +++ offset = get_offset_of_jal(insn_addr); +++ } else if (NativeInstruction::is_branch_at(insn_addr)) { // beq/bge/bgeu/blt/bltu/bne +++ offset = get_offset_of_conditional_branch(insn_addr); +++ } else if (NativeInstruction::is_pc_relative_at(insn_addr)) { // auipc, addi/jalr/load +++ offset = get_offset_of_pc_relative(insn_addr); +++ } else if (NativeInstruction::is_movptr_at(insn_addr)) { // movptr +++ return get_target_of_movptr(insn_addr); +++ } else if (NativeInstruction::is_li64_at(insn_addr)) { // li64 +++ return get_target_of_li64(insn_addr); +++ } else if (NativeInstruction::is_li32_at(insn_addr)) { // li32 +++ return get_target_of_li32(insn_addr); +++ } else { +++ ShouldNotReachHere(); +++ } +++ return address(((uintptr_t)insn_addr + offset)); +++} +++ +++int MacroAssembler::patch_oop(address insn_addr, address o) { +++ // OOPs are either narrow (32 bits) or wide (48 bits). We encode +++ // narrow OOPs by setting the upper 16 bits in the first +++ // instruction. +++ if (NativeInstruction::is_li32_at(insn_addr)) { +++ // Move narrow OOP +++ uint32_t n = CompressedOops::narrow_oop_value(cast_to_oop(o)); +++ return patch_imm_in_li32(insn_addr, (int32_t)n); +++ } else if (NativeInstruction::is_movptr_at(insn_addr)) { +++ // Move wide OOP +++ return patch_addr_in_movptr(insn_addr, o); +++ } +++ ShouldNotReachHere(); +++ return -1; +++} +++ +++void MacroAssembler::reinit_heapbase() { +++ if (UseCompressedOops) { +++ if (Universe::is_fully_initialized()) { +++ mv(xheapbase, CompressedOops::ptrs_base()); +++ } else { +++ ExternalAddress target((address)CompressedOops::ptrs_base_addr()); +++ relocate(target.rspec(), [&] { +++ int32_t offset; +++ la_patchable(xheapbase, target, offset); +++ ld(xheapbase, Address(xheapbase, offset)); +++ }); +++ } +++ } +++} +++ +++void MacroAssembler::movptr(Register Rd, address addr, int32_t &offset) { +++ int64_t imm64 = (int64_t)addr; +++#ifndef PRODUCT +++ { +++ char buffer[64]; +++ snprintf(buffer, sizeof(buffer), "0x%" PRIx64, imm64); +++ block_comment(buffer); +++ } +++#endif +++ assert((uintptr_t)imm64 < (1ull << 48), "48-bit overflow in address constant"); +++ // Load upper 31 bits +++ int64_t imm = imm64 >> 17; +++ int64_t upper = imm, lower = imm; +++ lower = (lower << 52) >> 52; +++ upper -= lower; +++ upper = (int32_t)upper; +++ lui(Rd, upper); +++ addi(Rd, Rd, lower); +++ +++ // Load the rest 17 bits. +++ slli(Rd, Rd, 11); +++ addi(Rd, Rd, (imm64 >> 6) & 0x7ff); +++ slli(Rd, Rd, 6); +++ +++ // This offset will be used by following jalr/ld. +++ offset = imm64 & 0x3f; +++} +++ +++void MacroAssembler::add(Register Rd, Register Rn, int64_t increment, Register temp) { +++ if (is_simm12(increment)) { +++ addi(Rd, Rn, increment); +++ } else { +++ assert_different_registers(Rn, temp); +++ li(temp, increment); +++ add(Rd, Rn, temp); +++ } +++} +++ +++void MacroAssembler::addw(Register Rd, Register Rn, int32_t increment, Register temp) { +++ if (is_simm12(increment)) { +++ addiw(Rd, Rn, increment); +++ } else { +++ assert_different_registers(Rn, temp); +++ li(temp, increment); +++ addw(Rd, Rn, temp); +++ } +++} +++ +++void MacroAssembler::sub(Register Rd, Register Rn, int64_t decrement, Register temp) { +++ if (is_simm12(-decrement)) { +++ addi(Rd, Rn, -decrement); +++ } else { +++ assert_different_registers(Rn, temp); +++ li(temp, decrement); +++ sub(Rd, Rn, temp); +++ } +++} +++ +++void MacroAssembler::subw(Register Rd, Register Rn, int32_t decrement, Register temp) { +++ if (is_simm12(-decrement)) { +++ addiw(Rd, Rn, -decrement); +++ } else { +++ assert_different_registers(Rn, temp); +++ li(temp, decrement); +++ subw(Rd, Rn, temp); +++ } +++} +++ +++void MacroAssembler::andrw(Register Rd, Register Rs1, Register Rs2) { +++ andr(Rd, Rs1, Rs2); +++ sign_extend(Rd, Rd, 32); +++} +++ +++void MacroAssembler::orrw(Register Rd, Register Rs1, Register Rs2) { +++ orr(Rd, Rs1, Rs2); +++ sign_extend(Rd, Rd, 32); +++} +++ +++void MacroAssembler::xorrw(Register Rd, Register Rs1, Register Rs2) { +++ xorr(Rd, Rs1, Rs2); +++ sign_extend(Rd, Rd, 32); +++} +++ +++// Note: load_unsigned_short used to be called load_unsigned_word. +++int MacroAssembler::load_unsigned_short(Register dst, Address src) { +++ int off = offset(); +++ lhu(dst, src); +++ return off; +++} +++ +++int MacroAssembler::load_unsigned_byte(Register dst, Address src) { +++ int off = offset(); +++ lbu(dst, src); +++ return off; +++} +++ +++int MacroAssembler::load_signed_short(Register dst, Address src) { +++ int off = offset(); +++ lh(dst, src); +++ return off; +++} +++ +++int MacroAssembler::load_signed_byte(Register dst, Address src) { +++ int off = offset(); +++ lb(dst, src); +++ return off; +++} +++ +++void MacroAssembler::load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2) { +++ switch (size_in_bytes) { +++ case 8: ld(dst, src); break; +++ case 4: is_signed ? lw(dst, src) : lwu(dst, src); break; +++ case 2: is_signed ? load_signed_short(dst, src) : load_unsigned_short(dst, src); break; +++ case 1: is_signed ? load_signed_byte( dst, src) : load_unsigned_byte( dst, src); break; +++ default: ShouldNotReachHere(); +++ } +++} +++ +++void MacroAssembler::store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2) { +++ switch (size_in_bytes) { +++ case 8: sd(src, dst); break; +++ case 4: sw(src, dst); break; +++ case 2: sh(src, dst); break; +++ case 1: sb(src, dst); break; +++ default: ShouldNotReachHere(); +++ } +++} +++ +++// reverse bytes in halfword in lower 16 bits and sign-extend +++// Rd[15:0] = Rs[7:0] Rs[15:8] (sign-extend to 64 bits) +++void MacroAssembler::revb_h_h(Register Rd, Register Rs, Register tmp) { +++ if (UseZbb) { +++ rev8(Rd, Rs); +++ srai(Rd, Rd, 48); +++ return; +++ } +++ assert_different_registers(Rs, tmp); +++ assert_different_registers(Rd, tmp); +++ srli(tmp, Rs, 8); +++ andi(tmp, tmp, 0xFF); +++ slli(Rd, Rs, 56); +++ srai(Rd, Rd, 48); // sign-extend +++ orr(Rd, Rd, tmp); +++} +++ +++// reverse bytes in lower word and sign-extend +++// Rd[31:0] = Rs[7:0] Rs[15:8] Rs[23:16] Rs[31:24] (sign-extend to 64 bits) +++void MacroAssembler::revb_w_w(Register Rd, Register Rs, Register tmp1, Register tmp2) { +++ if (UseZbb) { +++ rev8(Rd, Rs); +++ srai(Rd, Rd, 32); +++ return; +++ } +++ assert_different_registers(Rs, tmp1, tmp2); +++ assert_different_registers(Rd, tmp1, tmp2); +++ revb_h_w_u(Rd, Rs, tmp1, tmp2); +++ slli(tmp2, Rd, 48); +++ srai(tmp2, tmp2, 32); // sign-extend +++ srli(Rd, Rd, 16); +++ orr(Rd, Rd, tmp2); +++} +++ +++// reverse bytes in halfword in lower 16 bits and zero-extend +++// Rd[15:0] = Rs[7:0] Rs[15:8] (zero-extend to 64 bits) +++void MacroAssembler::revb_h_h_u(Register Rd, Register Rs, Register tmp) { +++ if (UseZbb) { +++ rev8(Rd, Rs); +++ srli(Rd, Rd, 48); +++ return; +++ } +++ assert_different_registers(Rs, tmp); +++ assert_different_registers(Rd, tmp); +++ srli(tmp, Rs, 8); +++ andi(tmp, tmp, 0xFF); +++ andi(Rd, Rs, 0xFF); +++ slli(Rd, Rd, 8); +++ orr(Rd, Rd, tmp); +++} +++ +++// reverse bytes in halfwords in lower 32 bits and zero-extend +++// Rd[31:0] = Rs[23:16] Rs[31:24] Rs[7:0] Rs[15:8] (zero-extend to 64 bits) +++void MacroAssembler::revb_h_w_u(Register Rd, Register Rs, Register tmp1, Register tmp2) { +++ if (UseZbb) { +++ rev8(Rd, Rs); +++ rori(Rd, Rd, 32); +++ roriw(Rd, Rd, 16); +++ zero_extend(Rd, Rd, 32); +++ return; +++ } +++ assert_different_registers(Rs, tmp1, tmp2); +++ assert_different_registers(Rd, tmp1, tmp2); +++ srli(tmp2, Rs, 16); +++ revb_h_h_u(tmp2, tmp2, tmp1); +++ revb_h_h_u(Rd, Rs, tmp1); +++ slli(tmp2, tmp2, 16); +++ orr(Rd, Rd, tmp2); +++} +++ +++// This method is only used for revb_h +++// Rd = Rs[47:0] Rs[55:48] Rs[63:56] +++void MacroAssembler::revb_h_helper(Register Rd, Register Rs, Register tmp1, Register tmp2) { +++ assert_different_registers(Rs, tmp1, tmp2); +++ assert_different_registers(Rd, tmp1); +++ srli(tmp1, Rs, 48); +++ andi(tmp2, tmp1, 0xFF); +++ slli(tmp2, tmp2, 8); +++ srli(tmp1, tmp1, 8); +++ orr(tmp1, tmp1, tmp2); +++ slli(Rd, Rs, 16); +++ orr(Rd, Rd, tmp1); +++} +++ +++// reverse bytes in each halfword +++// Rd[63:0] = Rs[55:48] Rs[63:56] Rs[39:32] Rs[47:40] Rs[23:16] Rs[31:24] Rs[7:0] Rs[15:8] +++void MacroAssembler::revb_h(Register Rd, Register Rs, Register tmp1, Register tmp2) { +++ if (UseZbb) { +++ assert_different_registers(Rs, tmp1); +++ assert_different_registers(Rd, tmp1); +++ rev8(Rd, Rs); +++ zero_extend(tmp1, Rd, 32); +++ roriw(tmp1, tmp1, 16); +++ slli(tmp1, tmp1, 32); +++ srli(Rd, Rd, 32); +++ roriw(Rd, Rd, 16); +++ zero_extend(Rd, Rd, 32); +++ orr(Rd, Rd, tmp1); +++ return; +++ } +++ assert_different_registers(Rs, tmp1, tmp2); +++ assert_different_registers(Rd, tmp1, tmp2); +++ revb_h_helper(Rd, Rs, tmp1, tmp2); +++ for (int i = 0; i < 3; ++i) { +++ revb_h_helper(Rd, Rd, tmp1, tmp2); +++ } +++} +++ +++// reverse bytes in each word +++// Rd[63:0] = Rs[39:32] Rs[47:40] Rs[55:48] Rs[63:56] Rs[7:0] Rs[15:8] Rs[23:16] Rs[31:24] +++void MacroAssembler::revb_w(Register Rd, Register Rs, Register tmp1, Register tmp2) { +++ if (UseZbb) { +++ rev8(Rd, Rs); +++ rori(Rd, Rd, 32); +++ return; +++ } +++ assert_different_registers(Rs, tmp1, tmp2); +++ assert_different_registers(Rd, tmp1, tmp2); +++ revb(Rd, Rs, tmp1, tmp2); +++ ror_imm(Rd, Rd, 32); +++} +++ +++// reverse bytes in doubleword +++// Rd[63:0] = Rs[7:0] Rs[15:8] Rs[23:16] Rs[31:24] Rs[39:32] Rs[47,40] Rs[55,48] Rs[63:56] +++void MacroAssembler::revb(Register Rd, Register Rs, Register tmp1, Register tmp2) { +++ if (UseZbb) { +++ rev8(Rd, Rs); +++ return; +++ } +++ assert_different_registers(Rs, tmp1, tmp2); +++ assert_different_registers(Rd, tmp1, tmp2); +++ andi(tmp1, Rs, 0xFF); +++ slli(tmp1, tmp1, 8); +++ for (int step = 8; step < 56; step += 8) { +++ srli(tmp2, Rs, step); +++ andi(tmp2, tmp2, 0xFF); +++ orr(tmp1, tmp1, tmp2); +++ slli(tmp1, tmp1, 8); +++ } +++ srli(Rd, Rs, 56); +++ andi(Rd, Rd, 0xFF); +++ orr(Rd, tmp1, Rd); +++} +++ +++// rotate right with shift bits +++void MacroAssembler::ror_imm(Register dst, Register src, uint32_t shift, Register tmp) +++{ +++ if (UseZbb) { +++ rori(dst, src, shift); +++ return; +++ } +++ +++ assert_different_registers(dst, tmp); +++ assert_different_registers(src, tmp); +++ assert(shift < 64, "shift amount must be < 64"); +++ slli(tmp, src, 64 - shift); +++ srli(dst, src, shift); +++ orr(dst, dst, tmp); +++} +++ +++void MacroAssembler::andi(Register Rd, Register Rn, int64_t imm, Register tmp) { +++ if (is_simm12(imm)) { +++ and_imm12(Rd, Rn, imm); +++ } else { +++ assert_different_registers(Rn, tmp); +++ mv(tmp, imm); +++ andr(Rd, Rn, tmp); +++ } +++} +++ +++void MacroAssembler::orptr(Address adr, RegisterOrConstant src, Register tmp1, Register tmp2) { +++ ld(tmp1, adr); +++ if (src.is_register()) { +++ orr(tmp1, tmp1, src.as_register()); +++ } else { +++ if (is_simm12(src.as_constant())) { +++ ori(tmp1, tmp1, src.as_constant()); +++ } else { +++ assert_different_registers(tmp1, tmp2); +++ mv(tmp2, src.as_constant()); +++ orr(tmp1, tmp1, tmp2); +++ } +++ } +++ sd(tmp1, adr); +++} +++ +++void MacroAssembler::cmp_klass(Register oop, Register trial_klass, Register tmp1, Register tmp2, Label &L) { +++ assert_different_registers(oop, trial_klass, tmp1, tmp2); +++ if (UseCompressedClassPointers) { +++ lwu(tmp1, Address(oop, oopDesc::klass_offset_in_bytes())); +++ if (CompressedKlassPointers::base() == NULL) { +++ slli(tmp1, tmp1, CompressedKlassPointers::shift()); +++ beq(trial_klass, tmp1, L); +++ return; +++ } +++ decode_klass_not_null(tmp1, tmp2); +++ } else { +++ ld(tmp1, Address(oop, oopDesc::klass_offset_in_bytes())); +++ } +++ beq(trial_klass, tmp1, L); +++} +++ +++// Move an oop into a register. immediate is true if we want +++// immediate instructions and nmethod entry barriers are not enabled. +++// i.e. we are not going to patch this instruction while the code is being +++// executed by another thread. +++void MacroAssembler::movoop(Register dst, jobject obj, bool immediate) { +++ int oop_index; +++ if (obj == NULL) { +++ oop_index = oop_recorder()->allocate_oop_index(obj); +++ } else { +++#ifdef ASSERT +++ { +++ ThreadInVMfromUnknown tiv; +++ assert(Universe::heap()->is_in(JNIHandles::resolve(obj)), "should be real oop"); +++ } +++#endif +++ oop_index = oop_recorder()->find_index(obj); +++ } +++ RelocationHolder rspec = oop_Relocation::spec(oop_index); +++ +++ // nmethod entry barrier necessitate using the constant pool. They have to be +++ // ordered with respected to oop access. +++ if (BarrierSet::barrier_set()->barrier_set_nmethod() != NULL || !immediate) { +++ address dummy = address(uintptr_t(pc()) & -wordSize); // A nearby aligned address +++ ld_constant(dst, Address(dummy, rspec)); +++ } else +++ mv(dst, Address((address)obj, rspec)); +++} +++ +++// Move a metadata address into a register. +++void MacroAssembler::mov_metadata(Register dst, Metadata* obj) { +++ int oop_index; +++ if (obj == NULL) { +++ oop_index = oop_recorder()->allocate_metadata_index(obj); +++ } else { +++ oop_index = oop_recorder()->find_index(obj); +++ } +++ RelocationHolder rspec = metadata_Relocation::spec(oop_index); +++ mv(dst, Address((address)obj, rspec)); +++} +++ +++// Writes to stack successive pages until offset reached to check for +++// stack overflow + shadow pages. This clobbers tmp. +++void MacroAssembler::bang_stack_size(Register size, Register tmp) { +++ assert_different_registers(tmp, size, t0); +++ // Bang stack for total size given plus shadow page size. +++ // Bang one page at a time because large size can bang beyond yellow and +++ // red zones. +++ mv(t0, os::vm_page_size()); +++ Label loop; +++ bind(loop); +++ sub(tmp, sp, t0); +++ subw(size, size, t0); +++ sd(size, Address(tmp)); +++ bgtz(size, loop); +++ +++ // Bang down shadow pages too. +++ // At this point, (tmp-0) is the last address touched, so don't +++ // touch it again. (It was touched as (tmp-pagesize) but then tmp +++ // was post-decremented.) Skip this address by starting at i=1, and +++ // touch a few more pages below. N.B. It is important to touch all +++ // the way down to and including i=StackShadowPages. +++ for (int i = 0; i < (int)(StackOverflow::stack_shadow_zone_size() / os::vm_page_size()) - 1; i++) { +++ // this could be any sized move but this is can be a debugging crumb +++ // so the bigger the better. +++ sub(tmp, tmp, os::vm_page_size()); +++ sd(size, Address(tmp, 0)); +++ } +++} +++ +++SkipIfEqual::SkipIfEqual(MacroAssembler* masm, const bool* flag_addr, bool value) { +++ int32_t offset = 0; +++ _masm = masm; +++ ExternalAddress target((address)flag_addr); +++ _masm->relocate(target.rspec(), [&] { +++ int32_t offset; +++ _masm->la_patchable(t0, target, offset); +++ _masm->lbu(t0, Address(t0, offset)); +++ }); +++ _masm->beqz(t0, _label); +++} +++ +++SkipIfEqual::~SkipIfEqual() { +++ _masm->bind(_label); +++ _masm = NULL; +++} +++ +++void MacroAssembler::load_mirror(Register dst, Register method, Register tmp) { +++ const int mirror_offset = in_bytes(Klass::java_mirror_offset()); +++ ld(dst, Address(xmethod, Method::const_offset())); +++ ld(dst, Address(dst, ConstMethod::constants_offset())); +++ ld(dst, Address(dst, ConstantPool::pool_holder_offset_in_bytes())); +++ ld(dst, Address(dst, mirror_offset)); +++ resolve_oop_handle(dst, tmp); +++} +++ +++void MacroAssembler::resolve_oop_handle(Register result, Register tmp) { +++ // OopHandle::resolve is an indirection. +++ assert_different_registers(result, tmp); +++ access_load_at(T_OBJECT, IN_NATIVE, result, Address(result, 0), tmp, noreg); +++} +++ +++// ((WeakHandle)result).resolve() +++void MacroAssembler::resolve_weak_handle(Register result, Register tmp) { +++ assert_different_registers(result, tmp); +++ Label resolved; +++ +++ // A null weak handle resolves to null. +++ beqz(result, resolved); +++ +++ // Only 64 bit platforms support GCs that require a tmp register +++ // Only IN_HEAP loads require a thread_tmp register +++ // WeakHandle::resolve is an indirection like jweak. +++ access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF, +++ result, Address(result), tmp, noreg /* tmp_thread */); +++ bind(resolved); +++} +++ +++void MacroAssembler::access_load_at(BasicType type, DecoratorSet decorators, +++ Register dst, Address src, +++ Register tmp1, Register thread_tmp) { +++ BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); +++ decorators = AccessInternal::decorator_fixup(decorators); +++ bool as_raw = (decorators & AS_RAW) != 0; +++ if (as_raw) { +++ bs->BarrierSetAssembler::load_at(this, decorators, type, dst, src, tmp1, thread_tmp); +++ } else { +++ bs->load_at(this, decorators, type, dst, src, tmp1, thread_tmp); +++ } +++} +++ +++void MacroAssembler::null_check(Register reg, int offset) { +++ if (needs_explicit_null_check(offset)) { +++ // provoke OS NULL exception if reg = NULL by +++ // accessing M[reg] w/o changing any registers +++ // NOTE: this is plenty to provoke a segv +++ ld(zr, Address(reg, 0)); +++ } else { +++ // nothing to do, (later) access of M[reg + offset] +++ // will provoke OS NULL exception if reg = NULL +++ } +++} +++ +++void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators, +++ Address dst, Register src, +++ Register tmp1, Register thread_tmp) { +++ BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); +++ decorators = AccessInternal::decorator_fixup(decorators); +++ bool as_raw = (decorators & AS_RAW) != 0; +++ if (as_raw) { +++ bs->BarrierSetAssembler::store_at(this, decorators, type, dst, src, tmp1, thread_tmp); +++ } else { +++ bs->store_at(this, decorators, type, dst, src, tmp1, thread_tmp); +++ } +++} +++ +++// Algorithm must match CompressedOops::encode. +++void MacroAssembler::encode_heap_oop(Register d, Register s) { +++ verify_oop(s, "broken oop in encode_heap_oop"); +++ if (CompressedOops::base() == NULL) { +++ if (CompressedOops::shift() != 0) { +++ assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); +++ srli(d, s, LogMinObjAlignmentInBytes); +++ } else { +++ mv(d, s); +++ } +++ } else { +++ Label notNull; +++ sub(d, s, xheapbase); +++ bgez(d, notNull); +++ mv(d, zr); +++ bind(notNull); +++ if (CompressedOops::shift() != 0) { +++ assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); +++ srli(d, d, CompressedOops::shift()); +++ } +++ } +++} +++ +++void MacroAssembler::load_klass(Register dst, Register src, Register tmp) { +++ assert_different_registers(dst, tmp); +++ assert_different_registers(src, tmp); +++ if (UseCompressedClassPointers) { +++ lwu(dst, Address(src, oopDesc::klass_offset_in_bytes())); +++ decode_klass_not_null(dst, tmp); +++ } else { +++ ld(dst, Address(src, oopDesc::klass_offset_in_bytes())); +++ } +++} +++ +++void MacroAssembler::store_klass(Register dst, Register src, Register tmp) { +++ // FIXME: Should this be a store release? concurrent gcs assumes +++ // klass length is valid if klass field is not null. +++ if (UseCompressedClassPointers) { +++ encode_klass_not_null(src, tmp); +++ sw(src, Address(dst, oopDesc::klass_offset_in_bytes())); +++ } else { +++ sd(src, Address(dst, oopDesc::klass_offset_in_bytes())); +++ } +++} +++ +++void MacroAssembler::store_klass_gap(Register dst, Register src) { +++ if (UseCompressedClassPointers) { +++ // Store to klass gap in destination +++ sw(src, Address(dst, oopDesc::klass_gap_offset_in_bytes())); +++ } +++} +++ +++void MacroAssembler::decode_klass_not_null(Register r, Register tmp) { +++ assert_different_registers(r, tmp); +++ decode_klass_not_null(r, r, tmp); +++} +++ +++void MacroAssembler::decode_klass_not_null(Register dst, Register src, Register tmp) { +++ assert(UseCompressedClassPointers, "should only be used for compressed headers"); +++ +++ if (CompressedKlassPointers::base() == NULL) { +++ if (CompressedKlassPointers::shift() != 0) { +++ assert(LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong"); +++ slli(dst, src, LogKlassAlignmentInBytes); +++ } else { +++ mv(dst, src); +++ } +++ return; +++ } +++ +++ Register xbase = dst; +++ if (dst == src) { +++ xbase = tmp; +++ } +++ +++ assert_different_registers(src, xbase); +++ mv(xbase, (uintptr_t)CompressedKlassPointers::base()); +++ +++ if (CompressedKlassPointers::shift() != 0) { +++ assert(LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong"); +++ assert_different_registers(t0, xbase); +++ shadd(dst, src, xbase, t0, LogKlassAlignmentInBytes); +++ } else { +++ add(dst, xbase, src); +++ } +++} +++ +++void MacroAssembler::encode_klass_not_null(Register r, Register tmp) { +++ assert_different_registers(r, tmp); +++ encode_klass_not_null(r, r, tmp); +++} +++ +++void MacroAssembler::encode_klass_not_null(Register dst, Register src, Register tmp) { +++ assert(UseCompressedClassPointers, "should only be used for compressed headers"); +++ +++ if (CompressedKlassPointers::base() == NULL) { +++ if (CompressedKlassPointers::shift() != 0) { +++ assert(LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong"); +++ srli(dst, src, LogKlassAlignmentInBytes); +++ } else { +++ mv(dst, src); +++ } +++ return; +++ } +++ +++ if (((uint64_t)CompressedKlassPointers::base() & 0xffffffff) == 0 && +++ CompressedKlassPointers::shift() == 0) { +++ zero_extend(dst, src, 32); +++ return; +++ } +++ +++ Register xbase = dst; +++ if (dst == src) { +++ xbase = tmp; +++ } +++ +++ assert_different_registers(src, xbase); +++ mv(xbase, (uintptr_t)CompressedKlassPointers::base()); +++ sub(dst, src, xbase); +++ if (CompressedKlassPointers::shift() != 0) { +++ assert(LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong"); +++ srli(dst, dst, LogKlassAlignmentInBytes); +++ } +++} +++ +++void MacroAssembler::decode_heap_oop_not_null(Register r) { +++ decode_heap_oop_not_null(r, r); +++} +++ +++void MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) { +++ assert(UseCompressedOops, "should only be used for compressed headers"); +++ assert(Universe::heap() != NULL, "java heap should be initialized"); +++ // Cannot assert, unverified entry point counts instructions (see .ad file) +++ // vtableStubs also counts instructions in pd_code_size_limit. +++ // Also do not verify_oop as this is called by verify_oop. +++ if (CompressedOops::shift() != 0) { +++ assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); +++ slli(dst, src, LogMinObjAlignmentInBytes); +++ if (CompressedOops::base() != NULL) { +++ add(dst, xheapbase, dst); +++ } +++ } else { +++ assert(CompressedOops::base() == NULL, "sanity"); +++ mv(dst, src); +++ } +++} +++ +++void MacroAssembler::decode_heap_oop(Register d, Register s) { +++ if (CompressedOops::base() == NULL) { +++ if (CompressedOops::shift() != 0 || d != s) { +++ slli(d, s, CompressedOops::shift()); +++ } +++ } else { +++ Label done; +++ mv(d, s); +++ beqz(s, done); +++ shadd(d, s, xheapbase, d, LogMinObjAlignmentInBytes); +++ bind(done); +++ } +++ verify_oop(d, "broken oop in decode_heap_oop"); +++} +++ +++void MacroAssembler::store_heap_oop(Address dst, Register src, Register tmp1, +++ Register thread_tmp, DecoratorSet decorators) { +++ access_store_at(T_OBJECT, IN_HEAP | decorators, dst, src, tmp1, thread_tmp); +++} +++ +++void MacroAssembler::load_heap_oop(Register dst, Address src, Register tmp1, +++ Register thread_tmp, DecoratorSet decorators) { +++ access_load_at(T_OBJECT, IN_HEAP | decorators, dst, src, tmp1, thread_tmp); +++} +++ +++void MacroAssembler::load_heap_oop_not_null(Register dst, Address src, Register tmp1, +++ Register thread_tmp, DecoratorSet decorators) { +++ access_load_at(T_OBJECT, IN_HEAP | IS_NOT_NULL, dst, src, tmp1, thread_tmp); +++} +++ +++// Used for storing NULLs. +++void MacroAssembler::store_heap_oop_null(Address dst) { +++ access_store_at(T_OBJECT, IN_HEAP, dst, noreg, noreg, noreg); +++} +++ +++int MacroAssembler::corrected_idivl(Register result, Register rs1, Register rs2, +++ bool want_remainder) +++{ +++ // Full implementation of Java idiv and irem. The function +++ // returns the (pc) offset of the div instruction - may be needed +++ // for implicit exceptions. +++ // +++ // input : rs1: dividend +++ // rs2: divisor +++ // +++ // result: either +++ // quotient (= rs1 idiv rs2) +++ // remainder (= rs1 irem rs2) +++ +++ +++ int idivl_offset = offset(); +++ if (!want_remainder) { +++ divw(result, rs1, rs2); +++ } else { +++ remw(result, rs1, rs2); // result = rs1 % rs2; +++ } +++ return idivl_offset; +++} +++ +++int MacroAssembler::corrected_idivq(Register result, Register rs1, Register rs2, +++ bool want_remainder) +++{ +++ // Full implementation of Java ldiv and lrem. The function +++ // returns the (pc) offset of the div instruction - may be needed +++ // for implicit exceptions. +++ // +++ // input : rs1: dividend +++ // rs2: divisor +++ // +++ // result: either +++ // quotient (= rs1 idiv rs2) +++ // remainder (= rs1 irem rs2) +++ +++ int idivq_offset = offset(); +++ if (!want_remainder) { +++ div(result, rs1, rs2); +++ } else { +++ rem(result, rs1, rs2); // result = rs1 % rs2; +++ } +++ return idivq_offset; +++} +++ +++// Look up the method for a megamorpic invkkeinterface call. +++// The target method is determined by . +++// The receiver klass is in recv_klass. +++// On success, the result will be in method_result, and execution falls through. +++// On failure, execution transfers to the given label. +++void MacroAssembler::lookup_interface_method(Register recv_klass, +++ Register intf_klass, +++ RegisterOrConstant itable_index, +++ Register method_result, +++ Register scan_tmp, +++ Label& L_no_such_interface, +++ bool return_method) { +++ assert_different_registers(recv_klass, intf_klass, scan_tmp); +++ assert_different_registers(method_result, intf_klass, scan_tmp); +++ assert(recv_klass != method_result || !return_method, +++ "recv_klass can be destroyed when mehtid isn't needed"); +++ assert(itable_index.is_constant() || itable_index.as_register() == method_result, +++ "caller must be same register for non-constant itable index as for method"); +++ +++ // Compute start of first itableOffsetEntry (which is at the end of the vtable). +++ int vtable_base = in_bytes(Klass::vtable_start_offset()); +++ int itentry_off = itableMethodEntry::method_offset_in_bytes(); +++ int scan_step = itableOffsetEntry::size() * wordSize; +++ int vte_size = vtableEntry::size_in_bytes(); +++ assert(vte_size == wordSize, "else adjust times_vte_scale"); +++ +++ lwu(scan_tmp, Address(recv_klass, Klass::vtable_length_offset())); +++ +++ // %%% Could store the aligned, prescaled offset in the klassoop. +++ shadd(scan_tmp, scan_tmp, recv_klass, scan_tmp, 3); +++ add(scan_tmp, scan_tmp, vtable_base); +++ +++ if (return_method) { +++ // Adjust recv_klass by scaled itable_index, so we can free itable_index. +++ assert(itableMethodEntry::size() * wordSize == wordSize, "adjust the scaling in the code below"); +++ if (itable_index.is_register()) { +++ slli(t0, itable_index.as_register(), 3); +++ } else { +++ mv(t0, itable_index.as_constant() << 3); +++ } +++ add(recv_klass, recv_klass, t0); +++ if (itentry_off) { +++ add(recv_klass, recv_klass, itentry_off); +++ } +++ } +++ +++ Label search, found_method; +++ +++ ld(method_result, Address(scan_tmp, itableOffsetEntry::interface_offset_in_bytes())); +++ beq(intf_klass, method_result, found_method); +++ bind(search); +++ // Check that the previous entry is non-null. A null entry means that +++ // the receiver class doens't implement the interface, and wasn't the +++ // same as when the caller was compiled. +++ beqz(method_result, L_no_such_interface, /* is_far */ true); +++ addi(scan_tmp, scan_tmp, scan_step); +++ ld(method_result, Address(scan_tmp, itableOffsetEntry::interface_offset_in_bytes())); +++ bne(intf_klass, method_result, search); +++ +++ bind(found_method); +++ +++ // Got a hit. +++ if (return_method) { +++ lwu(scan_tmp, Address(scan_tmp, itableOffsetEntry::offset_offset_in_bytes())); +++ add(method_result, recv_klass, scan_tmp); +++ ld(method_result, Address(method_result)); +++ } +++} +++ +++// virtual method calling +++void MacroAssembler::lookup_virtual_method(Register recv_klass, +++ RegisterOrConstant vtable_index, +++ Register method_result) { +++ const int base = in_bytes(Klass::vtable_start_offset()); +++ assert(vtableEntry::size() * wordSize == 8, +++ "adjust the scaling in the code below"); +++ int vtable_offset_in_bytes = base + vtableEntry::method_offset_in_bytes(); +++ +++ if (vtable_index.is_register()) { +++ shadd(method_result, vtable_index.as_register(), recv_klass, method_result, LogBytesPerWord); +++ ld(method_result, Address(method_result, vtable_offset_in_bytes)); +++ } else { +++ vtable_offset_in_bytes += vtable_index.as_constant() * wordSize; +++ ld(method_result, form_address(method_result, recv_klass, vtable_offset_in_bytes)); +++ } +++} +++ +++void MacroAssembler::membar(uint32_t order_constraint) { +++ address prev = pc() - NativeMembar::instruction_size; +++ address last = code()->last_insn(); +++ +++ if (last != NULL && nativeInstruction_at(last)->is_membar() && prev == last) { +++ NativeMembar *bar = NativeMembar_at(prev); +++ // We are merging two memory barrier instructions. On RISCV we +++ // can do this simply by ORing them together. +++ bar->set_kind(bar->get_kind() | order_constraint); +++ BLOCK_COMMENT("merged membar"); +++ } else { +++ code()->set_last_insn(pc()); +++ +++ uint32_t predecessor = 0; +++ uint32_t successor = 0; +++ +++ membar_mask_to_pred_succ(order_constraint, predecessor, successor); +++ fence(predecessor, successor); +++ } +++} +++ +++// Form an addres from base + offset in Rd. Rd my or may not +++// actually be used: you must use the Address that is returned. It +++// is up to you to ensure that the shift provided mathces the size +++// of your data. +++Address MacroAssembler::form_address(Register Rd, Register base, int64_t byte_offset) { +++ if (is_simm12(byte_offset)) { // 12: imm in range 2^12 +++ return Address(base, byte_offset); +++ } +++ +++ assert_different_registers(Rd, base, noreg); +++ +++ // Do it the hard way +++ mv(Rd, byte_offset); +++ add(Rd, base, Rd); +++ return Address(Rd); +++} +++ +++void MacroAssembler::check_klass_subtype(Register sub_klass, +++ Register super_klass, +++ Register tmp_reg, +++ Label& L_success) { +++ Label L_failure; +++ check_klass_subtype_fast_path(sub_klass, super_klass, tmp_reg, &L_success, &L_failure, NULL); +++ check_klass_subtype_slow_path(sub_klass, super_klass, tmp_reg, noreg, &L_success, NULL); +++ bind(L_failure); +++} +++ +++void MacroAssembler::safepoint_poll(Label& slow_path, bool at_return, bool acquire, bool in_nmethod) { +++ ld(t0, Address(xthread, JavaThread::polling_word_offset())); +++ if (acquire) { +++ membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore); +++ } +++ if (at_return) { +++ bgtu(in_nmethod ? sp : fp, t0, slow_path, true /* is_far */); +++ } else { +++ test_bit(t0, t0, exact_log2(SafepointMechanism::poll_bit())); +++ bnez(t0, slow_path, true /* is_far */); +++ } +++} +++ +++void MacroAssembler::cmpxchgptr(Register oldv, Register newv, Register addr, Register tmp, +++ Label &succeed, Label *fail) { +++ // oldv holds comparison value +++ // newv holds value to write in exchange +++ // addr identifies memory word to compare against/update +++ Label retry_load, nope; +++ bind(retry_load); +++ // Load reserved from the memory location +++ lr_d(tmp, addr, Assembler::aqrl); +++ // Fail and exit if it is not what we expect +++ bne(tmp, oldv, nope); +++ // If the store conditional succeeds, tmp will be zero +++ sc_d(tmp, newv, addr, Assembler::rl); +++ beqz(tmp, succeed); +++ // Retry only when the store conditional failed +++ j(retry_load); +++ +++ bind(nope); +++ membar(AnyAny); +++ mv(oldv, tmp); +++ if (fail != NULL) { +++ j(*fail); +++ } +++} +++ +++void MacroAssembler::cmpxchg_obj_header(Register oldv, Register newv, Register obj, Register tmp, +++ Label &succeed, Label *fail) { +++ assert(oopDesc::mark_offset_in_bytes() == 0, "assumption"); +++ cmpxchgptr(oldv, newv, obj, tmp, succeed, fail); +++} +++ +++void MacroAssembler::load_reserved(Register addr, +++ enum operand_size size, +++ Assembler::Aqrl acquire) { +++ switch (size) { +++ case int64: +++ lr_d(t0, addr, acquire); +++ break; +++ case int32: +++ lr_w(t0, addr, acquire); +++ break; +++ case uint32: +++ lr_w(t0, addr, acquire); +++ zero_extend(t0, t0, 32); +++ break; +++ default: +++ ShouldNotReachHere(); +++ } +++} +++ +++void MacroAssembler::store_conditional(Register addr, +++ Register new_val, +++ enum operand_size size, +++ Assembler::Aqrl release) { +++ switch (size) { +++ case int64: +++ sc_d(t0, new_val, addr, release); +++ break; +++ case int32: +++ case uint32: +++ sc_w(t0, new_val, addr, release); +++ break; +++ default: +++ ShouldNotReachHere(); +++ } +++} +++ +++ +++void MacroAssembler::cmpxchg_narrow_value_helper(Register addr, Register expected, +++ Register new_val, +++ enum operand_size size, +++ Register tmp1, Register tmp2, Register tmp3) { +++ assert(size == int8 || size == int16, "unsupported operand size"); +++ +++ Register aligned_addr = t1, shift = tmp1, mask = tmp2, not_mask = tmp3; +++ +++ andi(shift, addr, 3); +++ slli(shift, shift, 3); +++ +++ andi(aligned_addr, addr, ~3); +++ +++ if (size == int8) { +++ mv(mask, 0xff); +++ } else { +++ // size == int16 case +++ mv(mask, -1); +++ zero_extend(mask, mask, 16); +++ } +++ sll(mask, mask, shift); +++ +++ xori(not_mask, mask, -1); +++ +++ sll(expected, expected, shift); +++ andr(expected, expected, mask); +++ +++ sll(new_val, new_val, shift); +++ andr(new_val, new_val, mask); +++} +++ +++// cmpxchg_narrow_value will kill t0, t1, expected, new_val and tmps. +++// It's designed to implement compare and swap byte/boolean/char/short by lr.w/sc.w, +++// which are forced to work with 4-byte aligned address. +++void MacroAssembler::cmpxchg_narrow_value(Register addr, Register expected, +++ Register new_val, +++ enum operand_size size, +++ Assembler::Aqrl acquire, Assembler::Aqrl release, +++ Register result, bool result_as_bool, +++ Register tmp1, Register tmp2, Register tmp3) { +++ Register aligned_addr = t1, shift = tmp1, mask = tmp2, not_mask = tmp3, old = result, tmp = t0; +++ assert_different_registers(addr, old, mask, not_mask, new_val, expected, shift, tmp); +++ cmpxchg_narrow_value_helper(addr, expected, new_val, size, tmp1, tmp2, tmp3); +++ +++ Label retry, fail, done; +++ +++ bind(retry); +++ lr_w(old, aligned_addr, acquire); +++ andr(tmp, old, mask); +++ bne(tmp, expected, fail); +++ +++ andr(tmp, old, not_mask); +++ orr(tmp, tmp, new_val); +++ sc_w(tmp, tmp, aligned_addr, release); +++ bnez(tmp, retry); +++ +++ if (result_as_bool) { +++ mv(result, 1); +++ j(done); +++ +++ bind(fail); +++ mv(result, zr); +++ +++ bind(done); +++ } else { +++ andr(tmp, old, mask); +++ +++ bind(fail); +++ srl(result, tmp, shift); +++ +++ if (size == int8) { +++ sign_extend(result, result, 8); +++ } else { +++ // size == int16 case +++ sign_extend(result, result, 16); +++ } +++ } +++} +++ +++// weak_cmpxchg_narrow_value is a weak version of cmpxchg_narrow_value, to implement +++// the weak CAS stuff. The major difference is that it just failed when store conditional +++// failed. +++void MacroAssembler::weak_cmpxchg_narrow_value(Register addr, Register expected, +++ Register new_val, +++ enum operand_size size, +++ Assembler::Aqrl acquire, Assembler::Aqrl release, +++ Register result, +++ Register tmp1, Register tmp2, Register tmp3) { +++ Register aligned_addr = t1, shift = tmp1, mask = tmp2, not_mask = tmp3, old = result, tmp = t0; +++ assert_different_registers(addr, old, mask, not_mask, new_val, expected, shift, tmp); +++ cmpxchg_narrow_value_helper(addr, expected, new_val, size, tmp1, tmp2, tmp3); +++ +++ Label fail, done; +++ +++ lr_w(old, aligned_addr, acquire); +++ andr(tmp, old, mask); +++ bne(tmp, expected, fail); +++ +++ andr(tmp, old, not_mask); +++ orr(tmp, tmp, new_val); +++ sc_w(tmp, tmp, aligned_addr, release); +++ bnez(tmp, fail); +++ +++ // Success +++ mv(result, 1); +++ j(done); +++ +++ // Fail +++ bind(fail); +++ mv(result, zr); +++ +++ bind(done); +++} +++ +++void MacroAssembler::cmpxchg(Register addr, Register expected, +++ Register new_val, +++ enum operand_size size, +++ Assembler::Aqrl acquire, Assembler::Aqrl release, +++ Register result, bool result_as_bool) { +++ assert(size != int8 && size != int16, "unsupported operand size"); +++ +++ Label retry_load, done, ne_done; +++ bind(retry_load); +++ load_reserved(addr, size, acquire); +++ bne(t0, expected, ne_done); +++ store_conditional(addr, new_val, size, release); +++ bnez(t0, retry_load); +++ +++ // equal, succeed +++ if (result_as_bool) { +++ mv(result, 1); +++ } else { +++ mv(result, expected); +++ } +++ j(done); +++ +++ // not equal, failed +++ bind(ne_done); +++ if (result_as_bool) { +++ mv(result, zr); +++ } else { +++ mv(result, t0); +++ } +++ +++ bind(done); +++} +++ +++void MacroAssembler::cmpxchg_weak(Register addr, Register expected, +++ Register new_val, +++ enum operand_size size, +++ Assembler::Aqrl acquire, Assembler::Aqrl release, +++ Register result) { +++ Label fail, done; +++ load_reserved(addr, size, acquire); +++ bne(t0, expected, fail); +++ store_conditional(addr, new_val, size, release); +++ bnez(t0, fail); +++ +++ // Success +++ mv(result, 1); +++ j(done); +++ +++ // Fail +++ bind(fail); +++ mv(result, zr); +++ +++ bind(done); +++} +++ +++#define ATOMIC_OP(NAME, AOP, ACQUIRE, RELEASE) \ +++void MacroAssembler::atomic_##NAME(Register prev, RegisterOrConstant incr, Register addr) { \ +++ prev = prev->is_valid() ? prev : zr; \ +++ if (incr.is_register()) { \ +++ AOP(prev, addr, incr.as_register(), (Assembler::Aqrl)(ACQUIRE | RELEASE)); \ +++ } else { \ +++ mv(t0, incr.as_constant()); \ +++ AOP(prev, addr, t0, (Assembler::Aqrl)(ACQUIRE | RELEASE)); \ +++ } \ +++ return; \ +++} +++ +++ATOMIC_OP(add, amoadd_d, Assembler::relaxed, Assembler::relaxed) +++ATOMIC_OP(addw, amoadd_w, Assembler::relaxed, Assembler::relaxed) +++ATOMIC_OP(addal, amoadd_d, Assembler::aq, Assembler::rl) +++ATOMIC_OP(addalw, amoadd_w, Assembler::aq, Assembler::rl) +++ +++#undef ATOMIC_OP +++ +++#define ATOMIC_XCHG(OP, AOP, ACQUIRE, RELEASE) \ +++void MacroAssembler::atomic_##OP(Register prev, Register newv, Register addr) { \ +++ prev = prev->is_valid() ? prev : zr; \ +++ AOP(prev, addr, newv, (Assembler::Aqrl)(ACQUIRE | RELEASE)); \ +++ return; \ +++} +++ +++ATOMIC_XCHG(xchg, amoswap_d, Assembler::relaxed, Assembler::relaxed) +++ATOMIC_XCHG(xchgw, amoswap_w, Assembler::relaxed, Assembler::relaxed) +++ATOMIC_XCHG(xchgal, amoswap_d, Assembler::aq, Assembler::rl) +++ATOMIC_XCHG(xchgalw, amoswap_w, Assembler::aq, Assembler::rl) +++ +++#undef ATOMIC_XCHG +++ +++#define ATOMIC_XCHGU(OP1, OP2) \ +++void MacroAssembler::atomic_##OP1(Register prev, Register newv, Register addr) { \ +++ atomic_##OP2(prev, newv, addr); \ +++ zero_extend(prev, prev, 32); \ +++ return; \ +++} +++ +++ATOMIC_XCHGU(xchgwu, xchgw) +++ATOMIC_XCHGU(xchgalwu, xchgalw) +++ +++#undef ATOMIC_XCHGU +++ +++void MacroAssembler::far_jump(Address entry, CodeBuffer *cbuf, Register tmp) { +++ assert(ReservedCodeCacheSize < 4*G, "branch out of range"); +++ assert(CodeCache::find_blob(entry.target()) != NULL, +++ "destination of far call not found in code cache"); +++ IncompressibleRegion ir(this); // Fixed length: see MacroAssembler::far_branch_size() +++ if (far_branches()) { +++ // We can use auipc + jalr here because we know that the total size of +++ // the code cache cannot exceed 2Gb. +++ relocate(entry.rspec(), [&] { +++ int32_t offset; +++ la_patchable(tmp, entry, offset); +++ if (cbuf != NULL) { cbuf->set_insts_mark(); } +++ jalr(x0, tmp, offset); +++ }); +++ } else { +++ if (cbuf != NULL) { cbuf->set_insts_mark(); } +++ j(entry); +++ } +++} +++ +++void MacroAssembler::far_call(Address entry, CodeBuffer *cbuf, Register tmp) { +++ assert(ReservedCodeCacheSize < 4*G, "branch out of range"); +++ assert(CodeCache::find_blob(entry.target()) != NULL, +++ "destination of far call not found in code cache"); +++ IncompressibleRegion ir(this); // Fixed length: see MacroAssembler::far_branch_size() +++ if (far_branches()) { +++ // We can use auipc + jalr here because we know that the total size of +++ // the code cache cannot exceed 2Gb. +++ relocate(entry.rspec(), [&] { +++ int32_t offset; +++ la_patchable(tmp, entry, offset); +++ if (cbuf != NULL) { cbuf->set_insts_mark(); } +++ jalr(x1, tmp, offset); // link +++ }); +++ } else { +++ if (cbuf != NULL) { cbuf->set_insts_mark(); } +++ jal(entry); // link +++ } +++} +++ +++void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass, +++ Register super_klass, +++ Register tmp_reg, +++ Label* L_success, +++ Label* L_failure, +++ Label* L_slow_path, +++ Register super_check_offset) { +++ assert_different_registers(sub_klass, super_klass, tmp_reg); +++ bool must_load_sco = (super_check_offset == noreg); +++ if (must_load_sco) { +++ assert(tmp_reg != noreg, "supply either a temp or a register offset"); +++ } else { +++ assert_different_registers(sub_klass, super_klass, super_check_offset); +++ } +++ +++ Label L_fallthrough; +++ int label_nulls = 0; +++ if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; } +++ if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; } +++ if (L_slow_path == NULL) { L_slow_path = &L_fallthrough; label_nulls++; } +++ assert(label_nulls <= 1, "at most one NULL in batch"); +++ +++ int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); +++ int sco_offset = in_bytes(Klass::super_check_offset_offset()); +++ Address super_check_offset_addr(super_klass, sco_offset); +++ +++ // Hacked jmp, which may only be used just before L_fallthrough. +++#define final_jmp(label) \ +++ if (&(label) == &L_fallthrough) { /*do nothing*/ } \ +++ else j(label) /*omit semi*/ +++ +++ // If the pointers are equal, we are done (e.g., String[] elements). +++ // This self-check enables sharing of secondary supertype arrays among +++ // non-primary types such as array-of-interface. Otherwise, each such +++ // type would need its own customized SSA. +++ // We move this check to the front fo the fast path because many +++ // type checks are in fact trivially successful in this manner, +++ // so we get a nicely predicted branch right at the start of the check. +++ beq(sub_klass, super_klass, *L_success); +++ +++ // Check the supertype display: +++ if (must_load_sco) { +++ lwu(tmp_reg, super_check_offset_addr); +++ super_check_offset = tmp_reg; +++ } +++ add(t0, sub_klass, super_check_offset); +++ Address super_check_addr(t0); +++ ld(t0, super_check_addr); // load displayed supertype +++ +++ // Ths check has worked decisively for primary supers. +++ // Secondary supers are sought in the super_cache ('super_cache_addr'). +++ // (Secondary supers are interfaces and very deeply nested subtypes.) +++ // This works in the same check above because of a tricky aliasing +++ // between the super_Cache and the primary super dispaly elements. +++ // (The 'super_check_addr' can address either, as the case requires.) +++ // Note that the cache is updated below if it does not help us find +++ // what we need immediately. +++ // So if it was a primary super, we can just fail immediately. +++ // Otherwise, it's the slow path for us (no success at this point). +++ +++ beq(super_klass, t0, *L_success); +++ mv(t1, sc_offset); +++ if (L_failure == &L_fallthrough) { +++ beq(super_check_offset, t1, *L_slow_path); +++ } else { +++ bne(super_check_offset, t1, *L_failure, /* is_far */ true); +++ final_jmp(*L_slow_path); +++ } +++ +++ bind(L_fallthrough); +++ +++#undef final_jmp +++} +++ +++// Scans count pointer sized words at [addr] for occurence of value, +++// generic +++void MacroAssembler::repne_scan(Register addr, Register value, Register count, +++ Register tmp) { +++ Label Lloop, Lexit; +++ beqz(count, Lexit); +++ bind(Lloop); +++ ld(tmp, addr); +++ beq(value, tmp, Lexit); +++ add(addr, addr, wordSize); +++ sub(count, count, 1); +++ bnez(count, Lloop); +++ bind(Lexit); +++} +++ +++void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass, +++ Register super_klass, +++ Register tmp1_reg, +++ Register tmp2_reg, +++ Label* L_success, +++ Label* L_failure) { +++ assert_different_registers(sub_klass, super_klass, tmp1_reg); +++ if (tmp2_reg != noreg) { +++ assert_different_registers(sub_klass, super_klass, tmp1_reg, tmp2_reg, t0); +++ } +++#define IS_A_TEMP(reg) ((reg) == tmp1_reg || (reg) == tmp2_reg) +++ +++ Label L_fallthrough; +++ int label_nulls = 0; +++ if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; } +++ if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; } +++ +++ assert(label_nulls <= 1, "at most one NULL in the batch"); +++ +++ // A couple of usefule fields in sub_klass: +++ int ss_offset = in_bytes(Klass::secondary_supers_offset()); +++ int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); +++ Address secondary_supers_addr(sub_klass, ss_offset); +++ Address super_cache_addr( sub_klass, sc_offset); +++ +++ BLOCK_COMMENT("check_klass_subtype_slow_path"); +++ +++ // Do a linear scan of the secondary super-klass chain. +++ // This code is rarely used, so simplicity is a virtue here. +++ // The repne_scan instruction uses fixed registers, which we must spill. +++ // Don't worry too much about pre-existing connecitons with the input regs. +++ +++ assert(sub_klass != x10, "killed reg"); // killed by mv(x10, super) +++ assert(sub_klass != x12, "killed reg"); // killed by la(x12, &pst_counter) +++ +++ RegSet pushed_registers; +++ if (!IS_A_TEMP(x12)) { +++ pushed_registers += x12; +++ } +++ if (!IS_A_TEMP(x15)) { +++ pushed_registers += x15; +++ } +++ +++ if (super_klass != x10) { +++ if (!IS_A_TEMP(x10)) { +++ pushed_registers += x10; +++ } +++ } +++ +++ push_reg(pushed_registers, sp); +++ +++ // Get super_klass value into x10 (even if it was in x15 or x12) +++ mv(x10, super_klass); +++ +++#ifndef PRODUCT +++ mv(t1, (address)&SharedRuntime::_partial_subtype_ctr); +++ Address pst_counter_addr(t1); +++ ld(t0, pst_counter_addr); +++ add(t0, t0, 1); +++ sd(t0, pst_counter_addr); +++#endif // PRODUCT +++ +++ // We will consult the secondary-super array. +++ ld(x15, secondary_supers_addr); +++ // Load the array length. +++ lwu(x12, Address(x15, Array::length_offset_in_bytes())); +++ // Skip to start of data. +++ add(x15, x15, Array::base_offset_in_bytes()); +++ +++ // Set t0 to an obvious invalid value, falling through by default +++ mv(t0, -1); +++ // Scan X12 words at [X15] for an occurrence of X10. +++ repne_scan(x15, x10, x12, t0); +++ +++ // pop will restore x10, so we should use a temp register to keep its value +++ mv(t1, x10); +++ +++ // Unspill the temp registers: +++ pop_reg(pushed_registers, sp); +++ +++ bne(t1, t0, *L_failure); +++ +++ // Success. Cache the super we found an proceed in triumph. +++ sd(super_klass, super_cache_addr); +++ +++ if (L_success != &L_fallthrough) { +++ j(*L_success); +++ } +++ +++#undef IS_A_TEMP +++ +++ bind(L_fallthrough); +++} +++ +++// Defines obj, preserves var_size_in_bytes, okay for tmp2 == var_size_in_bytes. +++void MacroAssembler::tlab_allocate(Register obj, +++ Register var_size_in_bytes, +++ int con_size_in_bytes, +++ Register tmp1, +++ Register tmp2, +++ Label& slow_case, +++ bool is_far) { +++ BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); +++ bs->tlab_allocate(this, obj, var_size_in_bytes, con_size_in_bytes, tmp1, tmp2, slow_case, is_far); +++} +++ +++// Defines obj, preserves var_size_in_bytes +++void MacroAssembler::eden_allocate(Register obj, +++ Register var_size_in_bytes, +++ int con_size_in_bytes, +++ Register tmp, +++ Label& slow_case, +++ bool is_far) { +++ BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); +++ bs->eden_allocate(this, obj, var_size_in_bytes, con_size_in_bytes, tmp, slow_case, is_far); +++} +++ +++ +++// get_thread() can be called anywhere inside generated code so we +++// need to save whatever non-callee save context might get clobbered +++// by the call to Thread::current() or, indeed, the call setup code. +++void MacroAssembler::get_thread(Register thread) { +++ // save all call-clobbered regs except thread +++ RegSet saved_regs = RegSet::range(x5, x7) + RegSet::range(x10, x17) + +++ RegSet::range(x28, x31) + ra - thread; +++ push_reg(saved_regs, sp); +++ +++ mv(ra, CAST_FROM_FN_PTR(address, Thread::current)); +++ jalr(ra); +++ if (thread != c_rarg0) { +++ mv(thread, c_rarg0); +++ } +++ +++ // restore pushed registers +++ pop_reg(saved_regs, sp); +++} +++ +++void MacroAssembler::load_byte_map_base(Register reg) { +++ CardTable::CardValue* byte_map_base = +++ ((CardTableBarrierSet*)(BarrierSet::barrier_set()))->card_table()->byte_map_base(); +++ mv(reg, (uint64_t)byte_map_base); +++} +++ +++void MacroAssembler::la_patchable(Register reg1, const Address &dest, int32_t &offset) { +++ unsigned long low_address = (uintptr_t)CodeCache::low_bound(); +++ unsigned long high_address = (uintptr_t)CodeCache::high_bound(); +++ unsigned long dest_address = (uintptr_t)dest.target(); +++ long offset_low = dest_address - low_address; +++ long offset_high = dest_address - high_address; +++ +++ assert(dest.getMode() == Address::literal, "la_patchable must be applied to a literal address"); +++ assert((uintptr_t)dest.target() < (1ull << 48), "bad address"); +++ +++ // RISC-V doesn't compute a page-aligned address, in order to partially +++ // compensate for the use of *signed* offsets in its base+disp12 +++ // addressing mode (RISC-V's PC-relative reach remains asymmetric +++ // [-(2G + 2K), 2G - 2k). +++ if (offset_high >= -((1L << 31) + (1L << 11)) && offset_low < (1L << 31) - (1L << 11)) { +++ int64_t distance = dest.target() - pc(); +++ auipc(reg1, (int32_t)distance + 0x800); +++ offset = ((int32_t)distance << 20) >> 20; +++ } else { +++ movptr(reg1, dest.target(), offset); +++ } +++} +++ +++void MacroAssembler::build_frame(int framesize) { +++ assert(framesize >= 2, "framesize must include space for FP/RA"); +++ assert(framesize % (2*wordSize) == 0, "must preserve 2*wordSize alignment"); +++ sub(sp, sp, framesize); +++ sd(fp, Address(sp, framesize - 2 * wordSize)); +++ sd(ra, Address(sp, framesize - wordSize)); +++ if (PreserveFramePointer) { add(fp, sp, framesize); } +++} +++ +++void MacroAssembler::remove_frame(int framesize) { +++ assert(framesize >= 2, "framesize must include space for FP/RA"); +++ assert(framesize % (2*wordSize) == 0, "must preserve 2*wordSize alignment"); +++ ld(fp, Address(sp, framesize - 2 * wordSize)); +++ ld(ra, Address(sp, framesize - wordSize)); +++ add(sp, sp, framesize); +++} +++ +++void MacroAssembler::reserved_stack_check() { +++ // testing if reserved zone needs to be enabled +++ Label no_reserved_zone_enabling; +++ +++ ld(t0, Address(xthread, JavaThread::reserved_stack_activation_offset())); +++ bltu(sp, t0, no_reserved_zone_enabling); +++ +++ enter(); // RA and FP are live. +++ mv(c_rarg0, xthread); +++ RuntimeAddress target(CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone)); +++ relocate(target.rspec(), [&] { +++ int32_t offset; +++ la_patchable(t0, target, offset); +++ jalr(x1, t0, offset); +++ }); +++ leave(); +++ +++ // We have already removed our own frame. +++ // throw_delayed_StackOverflowError will think that it's been +++ // called by our caller. +++ target = RuntimeAddress(StubRoutines::throw_delayed_StackOverflowError_entry()); +++ relocate(target.rspec(), [&] { +++ int32_t offset; +++ la_patchable(t0, target, offset); +++ jalr(x0, t0, offset); +++ }); +++ should_not_reach_here(); +++ +++ bind(no_reserved_zone_enabling); +++} +++ +++void MacroAssembler::biased_locking_enter(Register lock_reg, +++ Register obj_reg, +++ Register swap_reg, +++ Register tmp_reg, +++ bool swap_reg_contains_mark, +++ Label& done, +++ Label* slow_case, +++ BiasedLockingCounters* counters, +++ Register flag) { +++ assert(UseBiasedLocking, "why call this otherwise?"); +++ assert_different_registers(lock_reg, obj_reg, swap_reg); +++ +++ if (PrintBiasedLockingStatistics && counters == NULL) { +++ counters = BiasedLocking::counters(); +++ } +++ +++ assert_different_registers(lock_reg, obj_reg, swap_reg, tmp_reg, t0, flag); +++ assert(markWord::age_shift == markWord::lock_bits + markWord::biased_lock_bits, "biased locking makes assumptions about bit layout"); +++ Address mark_addr (obj_reg, oopDesc::mark_offset_in_bytes()); +++ +++ // Biased locking +++ // See whether the lock is currently biased toward our thread and +++ // whether the epoch is still valid +++ // Note that the runtime guarantees sufficient alignment of JavaThread +++ // pointers to allow age to be placed into low bits +++ // First check to see whether biasing is even enabled for this object +++ Label cas_label; +++ if (!swap_reg_contains_mark) { +++ ld(swap_reg, mark_addr); +++ } +++ andi(tmp_reg, swap_reg, markWord::biased_lock_mask_in_place); +++ xori(t0, tmp_reg, (u1)markWord::biased_lock_pattern); +++ bnez(t0, cas_label); // don't care flag unless jumping to done +++ // The bias pattern is present in the object's header. Need to check +++ // whether the bias owner and the epoch are both still current. +++ load_prototype_header(tmp_reg, obj_reg); +++ orr(tmp_reg, tmp_reg, xthread); +++ xorr(tmp_reg, tmp_reg, swap_reg); +++ andi(tmp_reg, tmp_reg, ~((int) markWord::age_mask_in_place)); +++ if (flag->is_valid()) { +++ mv(flag, tmp_reg); +++ } +++ +++ if (counters != NULL) { +++ Label around; +++ bnez(tmp_reg, around); +++ atomic_incw(Address((address)counters->biased_lock_entry_count_addr()), tmp_reg, t0); +++ j(done); +++ bind(around); +++ } else { +++ beqz(tmp_reg, done); +++ } +++ +++ Label try_revoke_bias; +++ Label try_rebias; +++ +++ // At this point we know the header has the bias pattern and +++ // that we are not the bias owner in the current epoch. We need to +++ // figure out more details about the state of the header in order to +++ // know what operations can be legally performed on the object's +++ // header. +++ +++ // If the low three bits in the xor result aren't clear, that means +++ // the prototype header is no longer biased and we have to revoke +++ // the bias on this object. +++ andi(t0, tmp_reg, markWord::biased_lock_mask_in_place); +++ bnez(t0, try_revoke_bias); +++ +++ // Biasing is still enabled for this data type. See whether the +++ // epoch of the current bias is still valid, meaning that the epoch +++ // bits of the mark word are equal to the epoch bits of the +++ // prototype header. (Note that the prototype header's epoch bits +++ // only change at a safepoint.) If not, attempt to rebias the object +++ // toward the current thread. Note that we must be absolutely sure +++ // that the current epoch is invalid in order to do this because +++ // otherwise the manipulations it performs on the mark word are +++ // illegal. +++ andi(t0, tmp_reg, markWord::epoch_mask_in_place); +++ bnez(t0, try_rebias); +++ +++ // The epoch of the current bias is still valid but we know nothing +++ // about the owner; it might be set or it might be clear. Try to +++ // acquire the bias of the object using an atomic operation. If this +++ // fails we will go in to the runtime to revoke the object's bias. +++ // Note that we first construct the presumed unbiased header so we +++ // don't accidentally blow away another thread's valid bias. +++ { +++ Label cas_success; +++ Label counter; +++ li(t0, (int64_t)(markWord::biased_lock_mask_in_place | markWord::age_mask_in_place | markWord::epoch_mask_in_place)); +++ andr(swap_reg, swap_reg, t0); +++ orr(tmp_reg, swap_reg, xthread); +++ cmpxchg_obj_header(swap_reg, tmp_reg, obj_reg, t0, cas_success, slow_case); +++ // cas failed here if slow_cas == NULL +++ if (flag->is_valid()) { +++ li(flag, 1); +++ j(counter); +++ } +++ +++ // If the biasing toward our thread failed, this means that +++ // another thread succeeded in biasing it toward itself and we +++ // need to revoke that bias. The revocation will occur in the +++ // interpreter runtime in the slow case. +++ bind(cas_success); +++ if (flag->is_valid()) { +++ li(flag, 0); +++ bind(counter); +++ } +++ +++ if (counters != NULL) { +++ atomic_incw(Address((address)counters->anonymously_biased_lock_entry_count_addr()), +++ tmp_reg, t0); +++ } +++ } +++ j(done); +++ +++ bind(try_rebias); +++ // At this point we know the epoch has expired, meaning that the +++ // current "bias owner", if any, is actually invalid. Under these +++ // circumstantces _only_, we are allowed to use the current header's +++ // value as the comparison value when doing the cas to acquire the +++ // bias in the current epoch. In other words, we allow transfer of +++ // the bias from one thread to another directly in this situation. +++ // +++ // FIXME: due to a lack of registers we currently blow away the age +++ // bias in this situation. Should attempt to preserve them. +++ { +++ Label cas_success; +++ Label counter; +++ load_prototype_header(tmp_reg, obj_reg); +++ orr(tmp_reg, tmp_reg, xthread); +++ cmpxchg_obj_header(swap_reg, tmp_reg, obj_reg, t0, cas_success, slow_case); +++ // cas failed here if slow_case == NULL +++ if (flag->is_valid()) { +++ li(flag, 1); +++ j(counter); +++ } +++ +++ // If the biasing toward our thread failed, then another thread +++ // succeeded in biasing it toward itself and we need to revoke that +++ // bias. The revocation will occur in the runtime in the slow case. +++ bind(cas_success); +++ if (flag->is_valid()) { +++ li(flag, 0); +++ bind(counter); +++ } +++ +++ if (counters != NULL) { +++ atomic_incw(Address((address)counters->rebiased_lock_entry_count_addr()), +++ tmp_reg, t0); +++ } +++ } +++ j(done); +++ +++ // don't care flag unless jumping to done +++ bind(try_revoke_bias); +++ // The prototype mark in the klass doesn't have the bias bit set any +++ // more, indicating that objects of this data type are not supposed +++ // to be biased any more. We are going to try to reset the mark of +++ // this object to the prototype value and fail through to the +++ // CAS-based locking scheme. Note that if our CAS fails, it means +++ // that another thread raced us for the privilege of revoking the +++ // bias of this particular object, so it's okay to continue in the +++ // normal locking code. +++ // +++ // FIXME: due to a lack of registers we currently blow away the age +++ // bits in this situation. Should attempt to preserve them. +++ { +++ Label cas_success, nope; +++ load_prototype_header(tmp_reg, obj_reg); +++ cmpxchg_obj_header(swap_reg, tmp_reg, obj_reg, t0, cas_success, &nope); +++ bind(cas_success); +++ +++ // Fall through to the normal CAS-based lock, because no matter what +++ // the result of the above CAS, some thread must have succeeded in +++ // removing the bias bit from the object's header. +++ if (counters != NULL) { +++ atomic_incw(Address((address)counters->revoked_lock_entry_count_addr()), +++ tmp_reg, t0); +++ } +++ bind(nope); +++ } +++ +++ bind(cas_label); +++} +++ +++void MacroAssembler::biased_locking_exit(Register obj_reg, Register tmp_reg, Label& done, Register flag) { +++ assert(UseBiasedLocking, "why call this otherwise"); +++ +++ // Check for biased locking unlock case, which is a no-op +++ // Note: we do not have to check the thread ID for two reasons. +++ // First, the interpreter checks for IllegalMonitorStateException at +++ // a higher level. Second, if the bias was revoke while we held the +++ // lock, the object could not be rebiased toward another, so +++ // the bias bit would be clear. +++ ld(tmp_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes())); +++ andi(tmp_reg, tmp_reg, markWord::biased_lock_mask_in_place); +++ sub(tmp_reg, tmp_reg, markWord::biased_lock_pattern); +++ if (flag->is_valid()) { +++ mv(flag, tmp_reg); +++ } +++ beqz(tmp_reg, done); +++} +++ +++void MacroAssembler::load_prototype_header(Register dst, Register src) { +++ load_klass(dst, src); +++ ld(dst, Address(dst, Klass::prototype_header_offset())); +++} +++ +++void MacroAssembler::atomic_incw(Register counter_addr, Register tmp) { +++ Label retry_load; +++ bind(retry_load); +++ lr_w(tmp, counter_addr); +++ addw(tmp, tmp, 1); +++ sc_w(tmp, tmp, counter_addr); +++ bnez(tmp, retry_load); +++} +++ +++// Move the address of the polling page into dest. +++void MacroAssembler::get_polling_page(Register dest, relocInfo::relocType rtype) { +++ ld(dest, Address(xthread, JavaThread::polling_page_offset())); +++} +++ +++// Read the polling page. The address of the polling page must +++// already be in r. +++void MacroAssembler::read_polling_page(Register r, int32_t offset, relocInfo::relocType rtype) { +++ relocate(rtype, [&] { +++ lwu(zr, Address(r, offset)); +++ }); +++} +++ +++void MacroAssembler::set_narrow_oop(Register dst, jobject obj) { +++#ifdef ASSERT +++ { +++ ThreadInVMfromUnknown tiv; +++ assert (UseCompressedOops, "should only be used for compressed oops"); +++ assert (Universe::heap() != NULL, "java heap should be initialized"); +++ assert (oop_recorder() != NULL, "this assembler needs an OopRecorder"); +++ assert(Universe::heap()->is_in(JNIHandles::resolve(obj)), "should be real oop"); +++ } +++#endif +++ int oop_index = oop_recorder()->find_index(obj); +++ relocate(oop_Relocation::spec(oop_index), [&] { +++ li32(dst, 0xDEADBEEF); +++ }); +++ zero_extend(dst, dst, 32); +++} +++ +++void MacroAssembler::set_narrow_klass(Register dst, Klass* k) { +++ assert (UseCompressedClassPointers, "should only be used for compressed headers"); +++ assert (oop_recorder() != NULL, "this assembler needs an OopRecorder"); +++ int index = oop_recorder()->find_index(k); +++ assert(!Universe::heap()->is_in(k), "should not be an oop"); +++ +++ narrowKlass nk = CompressedKlassPointers::encode(k); +++ relocate(metadata_Relocation::spec(index), [&] { +++ li32(dst, nk); +++ }); +++ zero_extend(dst, dst, 32); +++} +++ +++// Maybe emit a call via a trampoline. If the code cache is small +++// trampolines won't be emitted. +++address MacroAssembler::trampoline_call(Address entry, CodeBuffer* cbuf) { +++ assert(JavaThread::current()->is_Compiler_thread(), "just checking"); +++ assert(entry.rspec().type() == relocInfo::runtime_call_type || +++ entry.rspec().type() == relocInfo::opt_virtual_call_type || +++ entry.rspec().type() == relocInfo::static_call_type || +++ entry.rspec().type() == relocInfo::virtual_call_type, "wrong reloc type"); +++ +++ // We need a trampoline if branches are far. +++ if (far_branches()) { +++ bool in_scratch_emit_size = false; +++#ifdef COMPILER2 +++ // We don't want to emit a trampoline if C2 is generating dummy +++ // code during its branch shortening phase. +++ CompileTask* task = ciEnv::current()->task(); +++ in_scratch_emit_size = +++ (task != NULL && is_c2_compile(task->comp_level()) && +++ Compile::current()->output()->in_scratch_emit_size()); +++#endif +++ if (!in_scratch_emit_size) { +++ address stub = emit_trampoline_stub(offset(), entry.target()); +++ if (stub == NULL) { +++ postcond(pc() == badAddress); +++ return NULL; // CodeCache is full +++ } +++ } +++ } +++ +++ if (cbuf != NULL) { cbuf->set_insts_mark(); } +++#ifdef ASSERT +++ if (entry.rspec().type() != relocInfo::runtime_call_type) { +++ assert_alignment(pc()); +++ } +++#endif +++ relocate(entry.rspec(), [&] { +++ if (!far_branches()) { +++ jal(entry.target()); +++ } else { +++ jal(pc()); +++ } +++ }); +++ // just need to return a non-null address +++ postcond(pc() != badAddress); +++ return pc(); +++} +++ +++address MacroAssembler::ic_call(address entry, jint method_index) { +++ RelocationHolder rh = virtual_call_Relocation::spec(pc(), method_index); +++ IncompressibleRegion ir(this); // relocations +++ movptr(t1, (address)Universe::non_oop_word()); +++ assert_cond(entry != NULL); +++ return trampoline_call(Address(entry, rh)); +++} +++ +++// Emit a trampoline stub for a call to a target which is too far away. +++// +++// code sequences: +++// +++// call-site: +++// branch-and-link to or +++// +++// Related trampoline stub for this call site in the stub section: +++// load the call target from the constant pool +++// branch (RA still points to the call site above) +++ +++address MacroAssembler::emit_trampoline_stub(int insts_call_instruction_offset, +++ address dest) { +++ address stub = start_a_stub(NativeInstruction::instruction_size +++ + NativeCallTrampolineStub::instruction_size); +++ if (stub == NULL) { +++ return NULL; // CodeBuffer::expand failed +++ } +++ +++ // Create a trampoline stub relocation which relates this trampoline stub +++ // with the call instruction at insts_call_instruction_offset in the +++ // instructions code-section. +++ +++ // Make sure the address of destination 8-byte aligned after 3 instructions. +++ align(wordSize, NativeCallTrampolineStub::data_offset); +++ +++ RelocationHolder rh = trampoline_stub_Relocation::spec(code()->insts()->start() + +++ insts_call_instruction_offset); +++ const int stub_start_offset = offset(); +++ relocate(rh, [&] { +++ // Now, create the trampoline stub's code: +++ // - load the call +++ // - call +++ Label target; +++ ld(t0, target); // auipc + ld +++ jr(t0); // jalr +++ bind(target); +++ assert(offset() - stub_start_offset == NativeCallTrampolineStub::data_offset, +++ "should be"); +++ assert(offset() % wordSize == 0, "bad alignment"); +++ emit_int64((int64_t)dest); +++ }); +++ +++ const address stub_start_addr = addr_at(stub_start_offset); +++ +++ assert(is_NativeCallTrampolineStub_at(stub_start_addr), "doesn't look like a trampoline"); +++ +++ end_a_stub(); +++ return stub_start_addr; +++} +++ +++Address MacroAssembler::add_memory_helper(const Address dst, Register tmp) { +++ switch (dst.getMode()) { +++ case Address::base_plus_offset: +++ // This is the expected mode, although we allow all the other +++ // forms below. +++ return form_address(tmp, dst.base(), dst.offset()); +++ default: +++ la(tmp, dst); +++ return Address(tmp); +++ } +++} +++ +++void MacroAssembler::increment(const Address dst, int64_t value, Register tmp1, Register tmp2) { +++ assert(((dst.getMode() == Address::base_plus_offset && +++ is_simm12(dst.offset())) || is_simm12(value)), +++ "invalid value and address mode combination"); +++ Address adr = add_memory_helper(dst, tmp2); +++ assert(!adr.uses(tmp1), "invalid dst for address increment"); +++ ld(tmp1, adr); +++ add(tmp1, tmp1, value, tmp2); +++ sd(tmp1, adr); +++} +++ +++void MacroAssembler::incrementw(const Address dst, int32_t value, Register tmp1, Register tmp2) { +++ assert(((dst.getMode() == Address::base_plus_offset && +++ is_simm12(dst.offset())) || is_simm12(value)), +++ "invalid value and address mode combination"); +++ Address adr = add_memory_helper(dst, tmp2); +++ assert(!adr.uses(tmp1), "invalid dst for address increment"); +++ lwu(tmp1, adr); +++ addw(tmp1, tmp1, value, tmp2); +++ sw(tmp1, adr); +++} +++ +++void MacroAssembler::decrement(const Address dst, int64_t value, Register tmp1, Register tmp2) { +++ assert(((dst.getMode() == Address::base_plus_offset && +++ is_simm12(dst.offset())) || is_simm12(value)), +++ "invalid value and address mode combination"); +++ Address adr = add_memory_helper(dst, tmp2); +++ assert(!adr.uses(tmp1), "invalid dst for address decrement"); +++ ld(tmp1, adr); +++ sub(tmp1, tmp1, value, tmp2); +++ sd(tmp1, adr); +++} +++ +++void MacroAssembler::decrementw(const Address dst, int32_t value, Register tmp1, Register tmp2) { +++ assert(((dst.getMode() == Address::base_plus_offset && +++ is_simm12(dst.offset())) || is_simm12(value)), +++ "invalid value and address mode combination"); +++ Address adr = add_memory_helper(dst, tmp2); +++ assert(!adr.uses(tmp1), "invalid dst for address decrement"); +++ lwu(tmp1, adr); +++ subw(tmp1, tmp1, value, tmp2); +++ sw(tmp1, adr); +++} +++ +++void MacroAssembler::cmpptr(Register src1, Address src2, Label& equal) { +++ assert_different_registers(src1, t0); +++ relocate(src2.rspec(), [&] { +++ int32_t offset; +++ la_patchable(t0, src2, offset); +++ ld(t0, Address(t0, offset)); +++ }); +++ beq(src1, t0, equal); +++} +++ +++void MacroAssembler::load_method_holder_cld(Register result, Register method) { +++ load_method_holder(result, method); +++ ld(result, Address(result, InstanceKlass::class_loader_data_offset())); +++} +++ +++void MacroAssembler::load_method_holder(Register holder, Register method) { +++ ld(holder, Address(method, Method::const_offset())); // ConstMethod* +++ ld(holder, Address(holder, ConstMethod::constants_offset())); // ConstantPool* +++ ld(holder, Address(holder, ConstantPool::pool_holder_offset_in_bytes())); // InstanceKlass* +++} +++ +++// string indexof +++// compute index by trailing zeros +++void MacroAssembler::compute_index(Register haystack, Register trailing_zeros, +++ Register match_mask, Register result, +++ Register ch2, Register tmp, +++ bool haystack_isL) { +++ int haystack_chr_shift = haystack_isL ? 0 : 1; +++ srl(match_mask, match_mask, trailing_zeros); +++ srli(match_mask, match_mask, 1); +++ srli(tmp, trailing_zeros, LogBitsPerByte); +++ if (!haystack_isL) andi(tmp, tmp, 0xE); +++ add(haystack, haystack, tmp); +++ ld(ch2, Address(haystack)); +++ if (!haystack_isL) srli(tmp, tmp, haystack_chr_shift); +++ add(result, result, tmp); +++} +++ +++// string indexof +++// Find pattern element in src, compute match mask, +++// only the first occurrence of 0x80/0x8000 at low bits is the valid match index +++// match mask patterns and corresponding indices would be like: +++// - 0x8080808080808080 (Latin1) +++// - 7 6 5 4 3 2 1 0 (match index) +++// - 0x8000800080008000 (UTF16) +++// - 3 2 1 0 (match index) +++void MacroAssembler::compute_match_mask(Register src, Register pattern, Register match_mask, +++ Register mask1, Register mask2) { +++ xorr(src, pattern, src); +++ sub(match_mask, src, mask1); +++ orr(src, src, mask2); +++ notr(src, src); +++ andr(match_mask, match_mask, src); +++} +++ +++#ifdef COMPILER2 +++// Code for BigInteger::mulAdd instrinsic +++// out = x10 +++// in = x11 +++// offset = x12 (already out.length-offset) +++// len = x13 +++// k = x14 +++// tmp = x28 +++// +++// pseudo code from java implementation: +++// long kLong = k & LONG_MASK; +++// carry = 0; +++// offset = out.length-offset - 1; +++// for (int j = len - 1; j >= 0; j--) { +++// product = (in[j] & LONG_MASK) * kLong + (out[offset] & LONG_MASK) + carry; +++// out[offset--] = (int)product; +++// carry = product >>> 32; +++// } +++// return (int)carry; +++void MacroAssembler::mul_add(Register out, Register in, Register offset, +++ Register len, Register k, Register tmp) { +++ Label L_tail_loop, L_unroll, L_end; +++ mv(tmp, out); +++ mv(out, zr); +++ blez(len, L_end); +++ zero_extend(k, k, 32); +++ slliw(t0, offset, LogBytesPerInt); +++ add(offset, tmp, t0); +++ slliw(t0, len, LogBytesPerInt); +++ add(in, in, t0); +++ +++ const int unroll = 8; +++ mv(tmp, unroll); +++ blt(len, tmp, L_tail_loop); +++ bind(L_unroll); +++ for (int i = 0; i < unroll; i++) { +++ sub(in, in, BytesPerInt); +++ lwu(t0, Address(in, 0)); +++ mul(t1, t0, k); +++ add(t0, t1, out); +++ sub(offset, offset, BytesPerInt); +++ lwu(t1, Address(offset, 0)); +++ add(t0, t0, t1); +++ sw(t0, Address(offset, 0)); +++ srli(out, t0, 32); +++ } +++ subw(len, len, tmp); +++ bge(len, tmp, L_unroll); +++ +++ bind(L_tail_loop); +++ blez(len, L_end); +++ sub(in, in, BytesPerInt); +++ lwu(t0, Address(in, 0)); +++ mul(t1, t0, k); +++ add(t0, t1, out); +++ sub(offset, offset, BytesPerInt); +++ lwu(t1, Address(offset, 0)); +++ add(t0, t0, t1); +++ sw(t0, Address(offset, 0)); +++ srli(out, t0, 32); +++ subw(len, len, 1); +++ j(L_tail_loop); +++ +++ bind(L_end); +++} +++ +++// add two unsigned input and output carry +++void MacroAssembler::cad(Register dst, Register src1, Register src2, Register carry) +++{ +++ assert_different_registers(dst, carry); +++ assert_different_registers(dst, src2); +++ add(dst, src1, src2); +++ sltu(carry, dst, src2); +++} +++ +++// add two input with carry +++void MacroAssembler::adc(Register dst, Register src1, Register src2, Register carry) { +++ assert_different_registers(dst, carry); +++ add(dst, src1, src2); +++ add(dst, dst, carry); +++} +++ +++// add two unsigned input with carry and output carry +++void MacroAssembler::cadc(Register dst, Register src1, Register src2, Register carry) { +++ assert_different_registers(dst, src2); +++ adc(dst, src1, src2, carry); +++ sltu(carry, dst, src2); +++} +++ +++void MacroAssembler::add2_with_carry(Register final_dest_hi, Register dest_hi, Register dest_lo, +++ Register src1, Register src2, Register carry) { +++ cad(dest_lo, dest_lo, src1, carry); +++ add(dest_hi, dest_hi, carry); +++ cad(dest_lo, dest_lo, src2, carry); +++ add(final_dest_hi, dest_hi, carry); +++} +++ +++/** +++ * Multiply 32 bit by 32 bit first loop. +++ */ +++void MacroAssembler::multiply_32_x_32_loop(Register x, Register xstart, Register x_xstart, +++ Register y, Register y_idx, Register z, +++ Register carry, Register product, +++ Register idx, Register kdx) { +++ // jlong carry, x[], y[], z[]; +++ // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx--, kdx--) { +++ // long product = y[idx] * x[xstart] + carry; +++ // z[kdx] = (int)product; +++ // carry = product >>> 32; +++ // } +++ // z[xstart] = (int)carry; +++ +++ Label L_first_loop, L_first_loop_exit; +++ blez(idx, L_first_loop_exit); +++ +++ shadd(t0, xstart, x, t0, LogBytesPerInt); +++ lwu(x_xstart, Address(t0, 0)); +++ +++ bind(L_first_loop); +++ subw(idx, idx, 1); +++ shadd(t0, idx, y, t0, LogBytesPerInt); +++ lwu(y_idx, Address(t0, 0)); +++ mul(product, x_xstart, y_idx); +++ add(product, product, carry); +++ srli(carry, product, 32); +++ subw(kdx, kdx, 1); +++ shadd(t0, kdx, z, t0, LogBytesPerInt); +++ sw(product, Address(t0, 0)); +++ bgtz(idx, L_first_loop); +++ +++ bind(L_first_loop_exit); +++} +++ +++/** +++ * Multiply 64 bit by 64 bit first loop. +++ */ +++void MacroAssembler::multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart, +++ Register y, Register y_idx, Register z, +++ Register carry, Register product, +++ Register idx, Register kdx) { +++ // +++ // jlong carry, x[], y[], z[]; +++ // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx--, kdx--) { +++ // huge_128 product = y[idx] * x[xstart] + carry; +++ // z[kdx] = (jlong)product; +++ // carry = (jlong)(product >>> 64); +++ // } +++ // z[xstart] = carry; +++ // +++ +++ Label L_first_loop, L_first_loop_exit; +++ Label L_one_x, L_one_y, L_multiply; +++ +++ subw(xstart, xstart, 1); +++ bltz(xstart, L_one_x); +++ +++ shadd(t0, xstart, x, t0, LogBytesPerInt); +++ ld(x_xstart, Address(t0, 0)); +++ ror_imm(x_xstart, x_xstart, 32); // convert big-endian to little-endian +++ +++ bind(L_first_loop); +++ subw(idx, idx, 1); +++ bltz(idx, L_first_loop_exit); +++ subw(idx, idx, 1); +++ bltz(idx, L_one_y); +++ +++ shadd(t0, idx, y, t0, LogBytesPerInt); +++ ld(y_idx, Address(t0, 0)); +++ ror_imm(y_idx, y_idx, 32); // convert big-endian to little-endian +++ bind(L_multiply); +++ +++ mulhu(t0, x_xstart, y_idx); +++ mul(product, x_xstart, y_idx); +++ cad(product, product, carry, t1); +++ adc(carry, t0, zr, t1); +++ +++ subw(kdx, kdx, 2); +++ ror_imm(product, product, 32); // back to big-endian +++ shadd(t0, kdx, z, t0, LogBytesPerInt); +++ sd(product, Address(t0, 0)); +++ +++ j(L_first_loop); +++ +++ bind(L_one_y); +++ lwu(y_idx, Address(y, 0)); +++ j(L_multiply); +++ +++ bind(L_one_x); +++ lwu(x_xstart, Address(x, 0)); +++ j(L_first_loop); +++ +++ bind(L_first_loop_exit); +++} +++ +++/** +++ * Multiply 128 bit by 128 bit. Unrolled inner loop. +++ * +++ */ +++void MacroAssembler::multiply_128_x_128_loop(Register y, Register z, +++ Register carry, Register carry2, +++ Register idx, Register jdx, +++ Register yz_idx1, Register yz_idx2, +++ Register tmp, Register tmp3, Register tmp4, +++ Register tmp6, Register product_hi) { +++ // jlong carry, x[], y[], z[]; +++ // int kdx = xstart+1; +++ // for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop +++ // huge_128 tmp3 = (y[idx+1] * product_hi) + z[kdx+idx+1] + carry; +++ // jlong carry2 = (jlong)(tmp3 >>> 64); +++ // huge_128 tmp4 = (y[idx] * product_hi) + z[kdx+idx] + carry2; +++ // carry = (jlong)(tmp4 >>> 64); +++ // z[kdx+idx+1] = (jlong)tmp3; +++ // z[kdx+idx] = (jlong)tmp4; +++ // } +++ // idx += 2; +++ // if (idx > 0) { +++ // yz_idx1 = (y[idx] * product_hi) + z[kdx+idx] + carry; +++ // z[kdx+idx] = (jlong)yz_idx1; +++ // carry = (jlong)(yz_idx1 >>> 64); +++ // } +++ // +++ +++ Label L_third_loop, L_third_loop_exit, L_post_third_loop_done; +++ +++ srliw(jdx, idx, 2); +++ +++ bind(L_third_loop); +++ +++ subw(jdx, jdx, 1); +++ bltz(jdx, L_third_loop_exit); +++ subw(idx, idx, 4); +++ +++ shadd(t0, idx, y, t0, LogBytesPerInt); +++ ld(yz_idx2, Address(t0, 0)); +++ ld(yz_idx1, Address(t0, wordSize)); +++ +++ shadd(tmp6, idx, z, t0, LogBytesPerInt); +++ +++ ror_imm(yz_idx1, yz_idx1, 32); // convert big-endian to little-endian +++ ror_imm(yz_idx2, yz_idx2, 32); +++ +++ ld(t1, Address(tmp6, 0)); +++ ld(t0, Address(tmp6, wordSize)); +++ +++ mul(tmp3, product_hi, yz_idx1); // yz_idx1 * product_hi -> tmp4:tmp3 +++ mulhu(tmp4, product_hi, yz_idx1); +++ +++ ror_imm(t0, t0, 32, tmp); // convert big-endian to little-endian +++ ror_imm(t1, t1, 32, tmp); +++ +++ mul(tmp, product_hi, yz_idx2); // yz_idx2 * product_hi -> carry2:tmp +++ mulhu(carry2, product_hi, yz_idx2); +++ +++ cad(tmp3, tmp3, carry, carry); +++ adc(tmp4, tmp4, zr, carry); +++ cad(tmp3, tmp3, t0, t0); +++ cadc(tmp4, tmp4, tmp, t0); +++ adc(carry, carry2, zr, t0); +++ cad(tmp4, tmp4, t1, carry2); +++ adc(carry, carry, zr, carry2); +++ +++ ror_imm(tmp3, tmp3, 32); // convert little-endian to big-endian +++ ror_imm(tmp4, tmp4, 32); +++ sd(tmp4, Address(tmp6, 0)); +++ sd(tmp3, Address(tmp6, wordSize)); +++ +++ j(L_third_loop); +++ +++ bind(L_third_loop_exit); +++ +++ andi(idx, idx, 0x3); +++ beqz(idx, L_post_third_loop_done); +++ +++ Label L_check_1; +++ subw(idx, idx, 2); +++ bltz(idx, L_check_1); +++ +++ shadd(t0, idx, y, t0, LogBytesPerInt); +++ ld(yz_idx1, Address(t0, 0)); +++ ror_imm(yz_idx1, yz_idx1, 32); +++ +++ mul(tmp3, product_hi, yz_idx1); // yz_idx1 * product_hi -> tmp4:tmp3 +++ mulhu(tmp4, product_hi, yz_idx1); +++ +++ shadd(t0, idx, z, t0, LogBytesPerInt); +++ ld(yz_idx2, Address(t0, 0)); +++ ror_imm(yz_idx2, yz_idx2, 32, tmp); +++ +++ add2_with_carry(carry, tmp4, tmp3, carry, yz_idx2, tmp); +++ +++ ror_imm(tmp3, tmp3, 32, tmp); +++ sd(tmp3, Address(t0, 0)); +++ +++ bind(L_check_1); +++ +++ andi(idx, idx, 0x1); +++ subw(idx, idx, 1); +++ bltz(idx, L_post_third_loop_done); +++ shadd(t0, idx, y, t0, LogBytesPerInt); +++ lwu(tmp4, Address(t0, 0)); +++ mul(tmp3, tmp4, product_hi); // tmp4 * product_hi -> carry2:tmp3 +++ mulhu(carry2, tmp4, product_hi); +++ +++ shadd(t0, idx, z, t0, LogBytesPerInt); +++ lwu(tmp4, Address(t0, 0)); +++ +++ add2_with_carry(carry2, carry2, tmp3, tmp4, carry, t0); +++ +++ shadd(t0, idx, z, t0, LogBytesPerInt); +++ sw(tmp3, Address(t0, 0)); +++ +++ slli(t0, carry2, 32); +++ srli(carry, tmp3, 32); +++ orr(carry, carry, t0); +++ +++ bind(L_post_third_loop_done); +++} +++ +++/** +++ * Code for BigInteger::multiplyToLen() intrinsic. +++ * +++ * x10: x +++ * x11: xlen +++ * x12: y +++ * x13: ylen +++ * x14: z +++ * x15: zlen +++ * x16: tmp1 +++ * x17: tmp2 +++ * x7: tmp3 +++ * x28: tmp4 +++ * x29: tmp5 +++ * x30: tmp6 +++ * x31: tmp7 +++ */ +++void MacroAssembler::multiply_to_len(Register x, Register xlen, Register y, Register ylen, +++ Register z, Register zlen, +++ Register tmp1, Register tmp2, Register tmp3, Register tmp4, +++ Register tmp5, Register tmp6, Register product_hi) { +++ assert_different_registers(x, xlen, y, ylen, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6); +++ +++ const Register idx = tmp1; +++ const Register kdx = tmp2; +++ const Register xstart = tmp3; +++ +++ const Register y_idx = tmp4; +++ const Register carry = tmp5; +++ const Register product = xlen; +++ const Register x_xstart = zlen; // reuse register +++ +++ mv(idx, ylen); // idx = ylen; +++ mv(kdx, zlen); // kdx = xlen+ylen; +++ mv(carry, zr); // carry = 0; +++ +++ Label L_multiply_64_x_64_loop, L_done; +++ +++ subw(xstart, xlen, 1); +++ bltz(xstart, L_done); +++ +++ const Register jdx = tmp1; +++ +++ if (AvoidUnalignedAccesses) { +++ // Check if x and y are both 8-byte aligned. +++ orr(t0, xlen, ylen); +++ test_bit(t0, t0, 0); +++ beqz(t0, L_multiply_64_x_64_loop); +++ +++ multiply_32_x_32_loop(x, xstart, x_xstart, y, y_idx, z, carry, product, idx, kdx); +++ shadd(t0, xstart, z, t0, LogBytesPerInt); +++ sw(carry, Address(t0, 0)); +++ +++ Label L_second_loop_unaligned; +++ bind(L_second_loop_unaligned); +++ mv(carry, zr); +++ mv(jdx, ylen); +++ subw(xstart, xstart, 1); +++ bltz(xstart, L_done); +++ sub(sp, sp, 2 * wordSize); +++ sd(z, Address(sp, 0)); +++ sd(zr, Address(sp, wordSize)); +++ shadd(t0, xstart, z, t0, LogBytesPerInt); +++ addi(z, t0, 4); +++ shadd(t0, xstart, x, t0, LogBytesPerInt); +++ lwu(product, Address(t0, 0)); +++ Label L_third_loop, L_third_loop_exit; +++ +++ blez(jdx, L_third_loop_exit); +++ +++ bind(L_third_loop); +++ subw(jdx, jdx, 1); +++ shadd(t0, jdx, y, t0, LogBytesPerInt); +++ lwu(t0, Address(t0, 0)); +++ mul(t1, t0, product); +++ add(t0, t1, carry); +++ shadd(tmp6, jdx, z, t1, LogBytesPerInt); +++ lwu(t1, Address(tmp6, 0)); +++ add(t0, t0, t1); +++ sw(t0, Address(tmp6, 0)); +++ srli(carry, t0, 32); +++ bgtz(jdx, L_third_loop); +++ +++ bind(L_third_loop_exit); +++ ld(z, Address(sp, 0)); +++ addi(sp, sp, 2 * wordSize); +++ shadd(t0, xstart, z, t0, LogBytesPerInt); +++ sw(carry, Address(t0, 0)); +++ +++ j(L_second_loop_unaligned); +++ } +++ +++ bind(L_multiply_64_x_64_loop); +++ multiply_64_x_64_loop(x, xstart, x_xstart, y, y_idx, z, carry, product, idx, kdx); +++ +++ Label L_second_loop_aligned; +++ beqz(kdx, L_second_loop_aligned); +++ +++ Label L_carry; +++ subw(kdx, kdx, 1); +++ beqz(kdx, L_carry); +++ +++ shadd(t0, kdx, z, t0, LogBytesPerInt); +++ sw(carry, Address(t0, 0)); +++ srli(carry, carry, 32); +++ subw(kdx, kdx, 1); +++ +++ bind(L_carry); +++ shadd(t0, kdx, z, t0, LogBytesPerInt); +++ sw(carry, Address(t0, 0)); +++ +++ // Second and third (nested) loops. +++ // +++ // for (int i = xstart-1; i >= 0; i--) { // Second loop +++ // carry = 0; +++ // for (int jdx=ystart, k=ystart+1+i; jdx >= 0; jdx--, k--) { // Third loop +++ // long product = (y[jdx] & LONG_MASK) * (x[i] & LONG_MASK) + +++ // (z[k] & LONG_MASK) + carry; +++ // z[k] = (int)product; +++ // carry = product >>> 32; +++ // } +++ // z[i] = (int)carry; +++ // } +++ // +++ // i = xlen, j = tmp1, k = tmp2, carry = tmp5, x[i] = product_hi +++ +++ bind(L_second_loop_aligned); +++ mv(carry, zr); // carry = 0; +++ mv(jdx, ylen); // j = ystart+1 +++ +++ subw(xstart, xstart, 1); // i = xstart-1; +++ bltz(xstart, L_done); +++ +++ sub(sp, sp, 4 * wordSize); +++ sd(z, Address(sp, 0)); +++ +++ Label L_last_x; +++ shadd(t0, xstart, z, t0, LogBytesPerInt); +++ addi(z, t0, 4); +++ subw(xstart, xstart, 1); // i = xstart-1; +++ bltz(xstart, L_last_x); +++ +++ shadd(t0, xstart, x, t0, LogBytesPerInt); +++ ld(product_hi, Address(t0, 0)); +++ ror_imm(product_hi, product_hi, 32); // convert big-endian to little-endian +++ +++ Label L_third_loop_prologue; +++ bind(L_third_loop_prologue); +++ +++ sd(ylen, Address(sp, wordSize)); +++ sd(x, Address(sp, 2 * wordSize)); +++ sd(xstart, Address(sp, 3 * wordSize)); +++ multiply_128_x_128_loop(y, z, carry, x, jdx, ylen, product, +++ tmp2, x_xstart, tmp3, tmp4, tmp6, product_hi); +++ ld(z, Address(sp, 0)); +++ ld(ylen, Address(sp, wordSize)); +++ ld(x, Address(sp, 2 * wordSize)); +++ ld(xlen, Address(sp, 3 * wordSize)); // copy old xstart -> xlen +++ addi(sp, sp, 4 * wordSize); +++ +++ addiw(tmp3, xlen, 1); +++ shadd(t0, tmp3, z, t0, LogBytesPerInt); +++ sw(carry, Address(t0, 0)); +++ +++ subw(tmp3, tmp3, 1); +++ bltz(tmp3, L_done); +++ +++ srli(carry, carry, 32); +++ shadd(t0, tmp3, z, t0, LogBytesPerInt); +++ sw(carry, Address(t0, 0)); +++ j(L_second_loop_aligned); +++ +++ // Next infrequent code is moved outside loops. +++ bind(L_last_x); +++ lwu(product_hi, Address(x, 0)); +++ j(L_third_loop_prologue); +++ +++ bind(L_done); +++} +++#endif +++ +++// Count bits of trailing zero chars from lsb to msb until first non-zero element. +++// For LL case, one byte for one element, so shift 8 bits once, and for other case, +++// shift 16 bits once. +++void MacroAssembler::ctzc_bit(Register Rd, Register Rs, bool isLL, Register tmp1, Register tmp2) { +++ if (UseZbb) { +++ assert_different_registers(Rd, Rs, tmp1); +++ int step = isLL ? 8 : 16; +++ ctz(Rd, Rs); +++ andi(tmp1, Rd, step - 1); +++ sub(Rd, Rd, tmp1); +++ return; +++ } +++ +++ assert_different_registers(Rd, Rs, tmp1, tmp2); +++ Label Loop; +++ int step = isLL ? 8 : 16; +++ mv(Rd, -step); +++ mv(tmp2, Rs); +++ +++ bind(Loop); +++ addi(Rd, Rd, step); +++ andi(tmp1, tmp2, ((1 << step) - 1)); +++ srli(tmp2, tmp2, step); +++ beqz(tmp1, Loop); +++} +++ +++// This instruction reads adjacent 4 bytes from the lower half of source register, +++// inflate into a register, for example: +++// Rs: A7A6A5A4A3A2A1A0 +++// Rd: 00A300A200A100A0 +++void MacroAssembler::inflate_lo32(Register Rd, Register Rs, Register tmp1, Register tmp2) { +++ assert_different_registers(Rd, Rs, tmp1, tmp2); +++ +++ mv(tmp1, 0xFF); +++ mv(Rd, zr); +++ for (int i = 0; i <= 3; i++) { +++ andr(tmp2, Rs, tmp1); +++ if (i) { +++ slli(tmp2, tmp2, i * 8); +++ } +++ orr(Rd, Rd, tmp2); +++ if (i != 3) { +++ slli(tmp1, tmp1, 8); +++ } +++ } +++} +++ +++// This instruction reads adjacent 4 bytes from the upper half of source register, +++// inflate into a register, for example: +++// Rs: A7A6A5A4A3A2A1A0 +++// Rd: 00A700A600A500A4 +++void MacroAssembler::inflate_hi32(Register Rd, Register Rs, Register tmp1, Register tmp2) { +++ assert_different_registers(Rd, Rs, tmp1, tmp2); +++ +++ mv(tmp1, 0xFF00000000); +++ mv(Rd, zr); +++ for (int i = 0; i <= 3; i++) { +++ andr(tmp2, Rs, tmp1); +++ orr(Rd, Rd, tmp2); +++ srli(Rd, Rd, 8); +++ if (i != 3) { +++ slli(tmp1, tmp1, 8); +++ } +++ } +++} +++ +++// The size of the blocks erased by the zero_blocks stub. We must +++// handle anything smaller than this ourselves in zero_words(). +++const int MacroAssembler::zero_words_block_size = 8; +++ +++// zero_words() is used by C2 ClearArray patterns. It is as small as +++// possible, handling small word counts locally and delegating +++// anything larger to the zero_blocks stub. It is expanded many times +++// in compiled code, so it is important to keep it short. +++ +++// ptr: Address of a buffer to be zeroed. +++// cnt: Count in HeapWords. +++// +++// ptr, cnt, and t0 are clobbered. +++address MacroAssembler::zero_words(Register ptr, Register cnt) { +++ assert(is_power_of_2(zero_words_block_size), "adjust this"); +++ assert(ptr == x28 && cnt == x29, "mismatch in register usage"); +++ assert_different_registers(cnt, t0); +++ +++ BLOCK_COMMENT("zero_words {"); +++ +++ mv(t0, zero_words_block_size); +++ Label around, done, done16; +++ bltu(cnt, t0, around); +++ { +++ RuntimeAddress zero_blocks = RuntimeAddress(StubRoutines::riscv::zero_blocks()); +++ assert(zero_blocks.target() != NULL, "zero_blocks stub has not been generated"); +++ if (StubRoutines::riscv::complete()) { +++ address tpc = trampoline_call(zero_blocks); +++ if (tpc == NULL) { +++ DEBUG_ONLY(reset_labels(around)); +++ postcond(pc() == badAddress); +++ return NULL; +++ } +++ } else { +++ jal(zero_blocks); +++ } +++ } +++ bind(around); +++ for (int i = zero_words_block_size >> 1; i > 1; i >>= 1) { +++ Label l; +++ test_bit(t0, cnt, exact_log2(i)); +++ beqz(t0, l); +++ for (int j = 0; j < i; j++) { +++ sd(zr, Address(ptr, 0)); +++ addi(ptr, ptr, 8); +++ } +++ bind(l); +++ } +++ { +++ Label l; +++ test_bit(t0, cnt, 0); +++ beqz(t0, l); +++ sd(zr, Address(ptr, 0)); +++ bind(l); +++ } +++ +++ BLOCK_COMMENT("} zero_words"); +++ postcond(pc() != badAddress); +++ return pc(); +++} +++ +++#define SmallArraySize (18 * BytesPerLong) +++ +++// base: Address of a buffer to be zeroed, 8 bytes aligned. +++// cnt: Immediate count in HeapWords. +++void MacroAssembler::zero_words(Register base, u_int64_t cnt) { +++ assert_different_registers(base, t0, t1); +++ +++ BLOCK_COMMENT("zero_words {"); +++ +++ if (cnt <= SmallArraySize / BytesPerLong) { +++ for (int i = 0; i < (int)cnt; i++) { +++ sd(zr, Address(base, i * wordSize)); +++ } +++ } else { +++ const int unroll = 8; // Number of sd(zr, adr), instructions we'll unroll +++ int remainder = cnt % unroll; +++ for (int i = 0; i < remainder; i++) { +++ sd(zr, Address(base, i * wordSize)); +++ } +++ +++ Label loop; +++ Register cnt_reg = t0; +++ Register loop_base = t1; +++ cnt = cnt - remainder; +++ mv(cnt_reg, cnt); +++ add(loop_base, base, remainder * wordSize); +++ bind(loop); +++ sub(cnt_reg, cnt_reg, unroll); +++ for (int i = 0; i < unroll; i++) { +++ sd(zr, Address(loop_base, i * wordSize)); +++ } +++ add(loop_base, loop_base, unroll * wordSize); +++ bnez(cnt_reg, loop); +++ } +++ +++ BLOCK_COMMENT("} zero_words"); +++} +++ +++// base: Address of a buffer to be filled, 8 bytes aligned. +++// cnt: Count in 8-byte unit. +++// value: Value to be filled with. +++// base will point to the end of the buffer after filling. +++void MacroAssembler::fill_words(Register base, Register cnt, Register value) { +++// Algorithm: +++// +++// t0 = cnt & 7 +++// cnt -= t0 +++// p += t0 +++// switch (t0): +++// switch start: +++// do while cnt +++// cnt -= 8 +++// p[-8] = value +++// case 7: +++// p[-7] = value +++// case 6: +++// p[-6] = value +++// // ... +++// case 1: +++// p[-1] = value +++// case 0: +++// p += 8 +++// do-while end +++// switch end +++ +++ assert_different_registers(base, cnt, value, t0, t1); +++ +++ Label fini, skip, entry, loop; +++ const int unroll = 8; // Number of sd instructions we'll unroll +++ +++ beqz(cnt, fini); +++ +++ andi(t0, cnt, unroll - 1); +++ sub(cnt, cnt, t0); +++ // align 8, so first sd n % 8 = mod, next loop sd 8 * n. +++ shadd(base, t0, base, t1, 3); +++ la(t1, entry); +++ slli(t0, t0, 2); // sd_inst_nums * 4; t0 is cnt % 8, so t1 = t1 - sd_inst_nums * 4, 4 is sizeof(inst) +++ sub(t1, t1, t0); +++ jr(t1); +++ +++ bind(loop); +++ add(base, base, unroll * 8); +++ for (int i = -unroll; i < 0; i++) { +++ sd(value, Address(base, i * 8)); +++ } +++ bind(entry); +++ sub(cnt, cnt, unroll); +++ bgez(cnt, loop); +++ +++ bind(fini); +++} +++ +++#define FCVT_SAFE(FLOATCVT, FLOATSIG) \ +++void MacroAssembler::FLOATCVT##_safe(Register dst, FloatRegister src, Register tmp) { \ +++ Label done; \ +++ assert_different_registers(dst, tmp); \ +++ fclass_##FLOATSIG(tmp, src); \ +++ mv(dst, zr); \ +++ /* check if src is NaN */ \ +++ andi(tmp, tmp, 0b1100000000); \ +++ bnez(tmp, done); \ +++ FLOATCVT(dst, src); \ +++ bind(done); \ +++} +++ +++FCVT_SAFE(fcvt_w_s, s); +++FCVT_SAFE(fcvt_l_s, s); +++FCVT_SAFE(fcvt_w_d, d); +++FCVT_SAFE(fcvt_l_d, d); +++ +++#undef FCVT_SAFE +++ +++#define FCMP(FLOATTYPE, FLOATSIG) \ +++void MacroAssembler::FLOATTYPE##_compare(Register result, FloatRegister Rs1, \ +++ FloatRegister Rs2, int unordered_result) { \ +++ Label Ldone; \ +++ if (unordered_result < 0) { \ +++ /* we want -1 for unordered or less than, 0 for equal and 1 for greater than. */ \ +++ /* installs 1 if gt else 0 */ \ +++ flt_##FLOATSIG(result, Rs2, Rs1); \ +++ /* Rs1 > Rs2, install 1 */ \ +++ bgtz(result, Ldone); \ +++ feq_##FLOATSIG(result, Rs1, Rs2); \ +++ addi(result, result, -1); \ +++ /* Rs1 = Rs2, install 0 */ \ +++ /* NaN or Rs1 < Rs2, install -1 */ \ +++ bind(Ldone); \ +++ } else { \ +++ /* we want -1 for less than, 0 for equal and 1 for unordered or greater than. */ \ +++ /* installs 1 if gt or unordered else 0 */ \ +++ flt_##FLOATSIG(result, Rs1, Rs2); \ +++ /* Rs1 < Rs2, install -1 */ \ +++ bgtz(result, Ldone); \ +++ feq_##FLOATSIG(result, Rs1, Rs2); \ +++ addi(result, result, -1); \ +++ /* Rs1 = Rs2, install 0 */ \ +++ /* NaN or Rs1 > Rs2, install 1 */ \ +++ bind(Ldone); \ +++ neg(result, result); \ +++ } \ +++} +++ +++FCMP(float, s); +++FCMP(double, d); +++ +++#undef FCMP +++ +++// Zero words; len is in bytes +++// Destroys all registers except addr +++// len must be a nonzero multiple of wordSize +++void MacroAssembler::zero_memory(Register addr, Register len, Register tmp) { +++ assert_different_registers(addr, len, tmp, t0, t1); +++ +++#ifdef ASSERT +++ { +++ Label L; +++ andi(t0, len, BytesPerWord - 1); +++ beqz(t0, L); +++ stop("len is not a multiple of BytesPerWord"); +++ bind(L); +++ } +++#endif // ASSERT +++ +++#ifndef PRODUCT +++ block_comment("zero memory"); +++#endif // PRODUCT +++ +++ Label loop; +++ Label entry; +++ +++ // Algorithm: +++ // +++ // t0 = cnt & 7 +++ // cnt -= t0 +++ // p += t0 +++ // switch (t0) { +++ // do { +++ // cnt -= 8 +++ // p[-8] = 0 +++ // case 7: +++ // p[-7] = 0 +++ // case 6: +++ // p[-6] = 0 +++ // ... +++ // case 1: +++ // p[-1] = 0 +++ // case 0: +++ // p += 8 +++ // } while (cnt) +++ // } +++ +++ const int unroll = 8; // Number of sd(zr) instructions we'll unroll +++ +++ srli(len, len, LogBytesPerWord); +++ andi(t0, len, unroll - 1); // t0 = cnt % unroll +++ sub(len, len, t0); // cnt -= unroll +++ // tmp always points to the end of the region we're about to zero +++ shadd(tmp, t0, addr, t1, LogBytesPerWord); +++ la(t1, entry); +++ slli(t0, t0, 2); +++ sub(t1, t1, t0); +++ jr(t1); +++ bind(loop); +++ sub(len, len, unroll); +++ for (int i = -unroll; i < 0; i++) { +++ sd(zr, Address(tmp, i * wordSize)); +++ } +++ bind(entry); +++ add(tmp, tmp, unroll * wordSize); +++ bnez(len, loop); +++} +++ +++// shift left by shamt and add +++// Rd = (Rs1 << shamt) + Rs2 +++void MacroAssembler::shadd(Register Rd, Register Rs1, Register Rs2, Register tmp, int shamt) { +++ if (UseZba) { +++ if (shamt == 1) { +++ sh1add(Rd, Rs1, Rs2); +++ return; +++ } else if (shamt == 2) { +++ sh2add(Rd, Rs1, Rs2); +++ return; +++ } else if (shamt == 3) { +++ sh3add(Rd, Rs1, Rs2); +++ return; +++ } +++ } +++ +++ if (shamt != 0) { +++ slli(tmp, Rs1, shamt); +++ add(Rd, Rs2, tmp); +++ } else { +++ add(Rd, Rs1, Rs2); +++ } +++} +++ +++void MacroAssembler::zero_extend(Register dst, Register src, int bits) { +++ if (UseZba && bits == 32) { +++ zext_w(dst, src); +++ return; +++ } +++ +++ if (UseZbb && bits == 16) { +++ zext_h(dst, src); +++ return; +++ } +++ +++ if (bits == 8) { +++ zext_b(dst, src); +++ } else { +++ slli(dst, src, XLEN - bits); +++ srli(dst, dst, XLEN - bits); +++ } +++} +++ +++void MacroAssembler::sign_extend(Register dst, Register src, int bits) { +++ if (UseZbb) { +++ if (bits == 8) { +++ sext_b(dst, src); +++ return; +++ } else if (bits == 16) { +++ sext_h(dst, src); +++ return; +++ } +++ } +++ +++ if (bits == 32) { +++ sext_w(dst, src); +++ } else { +++ slli(dst, src, XLEN - bits); +++ srai(dst, dst, XLEN - bits); +++ } +++} +++ +++void MacroAssembler::cmp_l2i(Register dst, Register src1, Register src2, Register tmp) +++{ +++ if (src1 == src2) { +++ mv(dst, zr); +++ return; +++ } +++ Label done; +++ Register left = src1; +++ Register right = src2; +++ if (dst == src1) { +++ assert_different_registers(dst, src2, tmp); +++ mv(tmp, src1); +++ left = tmp; +++ } else if (dst == src2) { +++ assert_different_registers(dst, src1, tmp); +++ mv(tmp, src2); +++ right = tmp; +++ } +++ +++ // installs 1 if gt else 0 +++ slt(dst, right, left); +++ bnez(dst, done); +++ slt(dst, left, right); +++ // dst = -1 if lt; else if eq , dst = 0 +++ neg(dst, dst); +++ bind(done); +++} +++ +++void MacroAssembler::test_bit(Register Rd, Register Rs, uint32_t bit_pos, Register tmp) { +++ assert(bit_pos < 64, "invalid bit range"); +++ if (UseZbs) { +++ bexti(Rd, Rs, bit_pos); +++ return; +++ } +++ andi(Rd, Rs, 1UL << bit_pos, tmp); +++} ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/macroAssembler_riscv.hpp ++@@ -0,0 +1,1310 @@ +++/* +++ * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved. +++ * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#ifndef CPU_RISCV_MACROASSEMBLER_RISCV_HPP +++#define CPU_RISCV_MACROASSEMBLER_RISCV_HPP +++ +++#include "asm/assembler.inline.hpp" +++#include "metaprogramming/enableIf.hpp" +++#include "nativeInst_riscv.hpp" +++#include "oops/compressedOops.hpp" +++#include "utilities/powerOfTwo.hpp" +++ +++// MacroAssembler extends Assembler by frequently used macros. +++// +++// Instructions for which a 'better' code sequence exists depending +++// on arguments should also go in here. +++ +++class MacroAssembler: public Assembler { +++ +++ public: +++ MacroAssembler(CodeBuffer* code) : Assembler(code) {} +++ +++ virtual ~MacroAssembler() {} +++ +++ void safepoint_poll(Label& slow_path, bool at_return, bool acquire, bool in_nmethod); +++ +++ // Alignment +++ void align(int modulus, int extra_offset = 0); +++ +++ static inline void assert_alignment(address pc, int alignment = NativeInstruction::instruction_size) { +++ assert(is_aligned(pc, alignment), "bad alignment"); +++ } +++ +++ // Stack frame creation/removal +++ // Note that SP must be updated to the right place before saving/restoring RA and FP +++ // because signal based thread suspend/resume could happen asynchronously. +++ void enter() { +++ addi(sp, sp, - 2 * wordSize); +++ sd(ra, Address(sp, wordSize)); +++ sd(fp, Address(sp)); +++ addi(fp, sp, 2 * wordSize); +++ } +++ +++ void leave() { +++ addi(sp, fp, - 2 * wordSize); +++ ld(fp, Address(sp)); +++ ld(ra, Address(sp, wordSize)); +++ addi(sp, sp, 2 * wordSize); +++ } +++ +++ +++ // Support for getting the JavaThread pointer (i.e.; a reference to thread-local information) +++ // The pointer will be loaded into the thread register. +++ void get_thread(Register thread); +++ +++ // Support for VM calls +++ // +++ // It is imperative that all calls into the VM are handled via the call_VM macros. +++ // They make sure that the stack linkage is setup correctly. call_VM's correspond +++ // to ENTRY/ENTRY_X entry points while call_VM_leaf's correspond to LEAF entry points. +++ +++ void call_VM(Register oop_result, +++ address entry_point, +++ bool check_exceptions = true); +++ void call_VM(Register oop_result, +++ address entry_point, +++ Register arg_1, +++ bool check_exceptions = true); +++ void call_VM(Register oop_result, +++ address entry_point, +++ Register arg_1, Register arg_2, +++ bool check_exceptions = true); +++ void call_VM(Register oop_result, +++ address entry_point, +++ Register arg_1, Register arg_2, Register arg_3, +++ bool check_exceptions = true); +++ +++ // Overloadings with last_Java_sp +++ void call_VM(Register oop_result, +++ Register last_java_sp, +++ address entry_point, +++ int number_of_arguments = 0, +++ bool check_exceptions = true); +++ void call_VM(Register oop_result, +++ Register last_java_sp, +++ address entry_point, +++ Register arg_1, +++ bool check_exceptions = true); +++ void call_VM(Register oop_result, +++ Register last_java_sp, +++ address entry_point, +++ Register arg_1, Register arg_2, +++ bool check_exceptions = true); +++ void call_VM(Register oop_result, +++ Register last_java_sp, +++ address entry_point, +++ Register arg_1, Register arg_2, Register arg_3, +++ bool check_exceptions = true); +++ +++ void get_vm_result(Register oop_result, Register java_thread); +++ void get_vm_result_2(Register metadata_result, Register java_thread); +++ +++ // These always tightly bind to MacroAssembler::call_VM_leaf_base +++ // bypassing the virtual implementation +++ void call_VM_leaf(address entry_point, +++ int number_of_arguments = 0); +++ void call_VM_leaf(address entry_point, +++ Register arg_0); +++ void call_VM_leaf(address entry_point, +++ Register arg_0, Register arg_1); +++ void call_VM_leaf(address entry_point, +++ Register arg_0, Register arg_1, Register arg_2); +++ +++ // These always tightly bind to MacroAssembler::call_VM_base +++ // bypassing the virtual implementation +++ void super_call_VM_leaf(address entry_point, Register arg_0); +++ void super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1); +++ void super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2); +++ void super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3); +++ +++ // last Java Frame (fills frame anchor) +++ void set_last_Java_frame(Register last_java_sp, Register last_java_fp, address last_java_pc, Register tmp); +++ void set_last_Java_frame(Register last_java_sp, Register last_java_fp, Label &last_java_pc, Register tmp); +++ void set_last_Java_frame(Register last_java_sp, Register last_java_fp, Register last_java_pc, Register tmp); +++ +++ // thread in the default location (xthread) +++ void reset_last_Java_frame(bool clear_fp); +++ +++ virtual void call_VM_leaf_base( +++ address entry_point, // the entry point +++ int number_of_arguments, // the number of arguments to pop after the call +++ Label* retaddr = NULL +++ ); +++ +++ virtual void call_VM_leaf_base( +++ address entry_point, // the entry point +++ int number_of_arguments, // the number of arguments to pop after the call +++ Label& retaddr) { +++ call_VM_leaf_base(entry_point, number_of_arguments, &retaddr); +++ } +++ +++ virtual void call_VM_base( // returns the register containing the thread upon return +++ Register oop_result, // where an oop-result ends up if any; use noreg otherwise +++ Register java_thread, // the thread if computed before ; use noreg otherwise +++ Register last_java_sp, // to set up last_Java_frame in stubs; use noreg otherwise +++ address entry_point, // the entry point +++ int number_of_arguments, // the number of arguments (w/o thread) to pop after the call +++ bool check_exceptions // whether to check for pending exceptions after return +++ ); +++ +++ void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions); +++ +++ virtual void check_and_handle_earlyret(Register java_thread); +++ virtual void check_and_handle_popframe(Register java_thread); +++ +++ void resolve_weak_handle(Register result, Register tmp); +++ void resolve_oop_handle(Register result, Register tmp = x15); +++ void resolve_jobject(Register value, Register thread, Register tmp); +++ +++ void movoop(Register dst, jobject obj, bool immediate = false); +++ void mov_metadata(Register dst, Metadata* obj); +++ void bang_stack_size(Register size, Register tmp); +++ void set_narrow_oop(Register dst, jobject obj); +++ void set_narrow_klass(Register dst, Klass* k); +++ +++ void load_mirror(Register dst, Register method, Register tmp = x15); +++ void access_load_at(BasicType type, DecoratorSet decorators, Register dst, +++ Address src, Register tmp1, Register thread_tmp); +++ void access_store_at(BasicType type, DecoratorSet decorators, Address dst, +++ Register src, Register tmp1, Register thread_tmp); +++ void load_klass(Register dst, Register src, Register tmp = t0); +++ void store_klass(Register dst, Register src, Register tmp = t0); +++ void cmp_klass(Register oop, Register trial_klass, Register tmp1, Register tmp2, Label &L); +++ +++ void encode_klass_not_null(Register r, Register tmp = t0); +++ void decode_klass_not_null(Register r, Register tmp = t0); +++ void encode_klass_not_null(Register dst, Register src, Register tmp); +++ void decode_klass_not_null(Register dst, Register src, Register tmp); +++ void decode_heap_oop_not_null(Register r); +++ void decode_heap_oop_not_null(Register dst, Register src); +++ void decode_heap_oop(Register d, Register s); +++ void decode_heap_oop(Register r) { decode_heap_oop(r, r); } +++ void encode_heap_oop(Register d, Register s); +++ void encode_heap_oop(Register r) { encode_heap_oop(r, r); }; +++ void load_heap_oop(Register dst, Address src, Register tmp1 = noreg, +++ Register thread_tmp = noreg, DecoratorSet decorators = 0); +++ void load_heap_oop_not_null(Register dst, Address src, Register tmp1 = noreg, +++ Register thread_tmp = noreg, DecoratorSet decorators = 0); +++ void store_heap_oop(Address dst, Register src, Register tmp1 = noreg, +++ Register thread_tmp = noreg, DecoratorSet decorators = 0); +++ +++ void store_klass_gap(Register dst, Register src); +++ +++ // currently unimplemented +++ // Used for storing NULL. All other oop constants should be +++ // stored using routines that take a jobject. +++ void store_heap_oop_null(Address dst); +++ +++ // This dummy is to prevent a call to store_heap_oop from +++ // converting a zero (linke NULL) into a Register by giving +++ // the compiler two choices it can't resolve +++ +++ void store_heap_oop(Address dst, void* dummy); +++ +++ // Support for NULL-checks +++ // +++ // Generates code that causes a NULL OS exception if the content of reg is NULL. +++ // If the accessed location is M[reg + offset] and the offset is known, provide the +++ // offset. No explicit code generateion is needed if the offset is within a certain +++ // range (0 <= offset <= page_size). +++ +++ virtual void null_check(Register reg, int offset = -1); +++ static bool needs_explicit_null_check(intptr_t offset); +++ static bool uses_implicit_null_check(void* address); +++ +++ // idiv variant which deals with MINLONG as dividend and -1 as divisor +++ int corrected_idivl(Register result, Register rs1, Register rs2, +++ bool want_remainder); +++ int corrected_idivq(Register result, Register rs1, Register rs2, +++ bool want_remainder); +++ +++ // interface method calling +++ void lookup_interface_method(Register recv_klass, +++ Register intf_klass, +++ RegisterOrConstant itable_index, +++ Register method_result, +++ Register scan_tmp, +++ Label& no_such_interface, +++ bool return_method = true); +++ +++ // virtual method calling +++ // n.n. x86 allows RegisterOrConstant for vtable_index +++ void lookup_virtual_method(Register recv_klass, +++ RegisterOrConstant vtable_index, +++ Register method_result); +++ +++ // Form an addres from base + offset in Rd. Rd my or may not +++ // actually be used: you must use the Address that is returned. It +++ // is up to you to ensure that the shift provided mathces the size +++ // of your data. +++ Address form_address(Register Rd, Register base, int64_t byte_offset); +++ +++ // Sometimes we get misaligned loads and stores, usually from Unsafe +++ // accesses, and these can exceed the offset range. +++ Address legitimize_address(Register Rd, const Address &adr) { +++ if (adr.getMode() == Address::base_plus_offset) { +++ if (!is_simm12(adr.offset())) { +++ return form_address(Rd, adr.base(), adr.offset()); +++ } +++ } +++ return adr; +++ } +++ +++ // allocation +++ void tlab_allocate( +++ Register obj, // result: pointer to object after successful allocation +++ Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise +++ int con_size_in_bytes, // object size in bytes if known at compile time +++ Register tmp1, // temp register +++ Register tmp2, // temp register +++ Label& slow_case, // continuation point of fast allocation fails +++ bool is_far = false +++ ); +++ +++ void eden_allocate( +++ Register obj, // result: pointer to object after successful allocation +++ Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise +++ int con_size_in_bytes, // object size in bytes if known at compile time +++ Register tmp, // temp register +++ Label& slow_case, // continuation point if fast allocation fails +++ bool is_far = false +++ ); +++ +++ // Test sub_klass against super_klass, with fast and slow paths. +++ +++ // The fast path produces a tri-state answer: yes / no / maybe-slow. +++ // One of the three labels can be NULL, meaning take the fall-through. +++ // If super_check_offset is -1, the value is loaded up from super_klass. +++ // No registers are killed, except tmp_reg +++ void check_klass_subtype_fast_path(Register sub_klass, +++ Register super_klass, +++ Register tmp_reg, +++ Label* L_success, +++ Label* L_failure, +++ Label* L_slow_path, +++ Register super_check_offset = noreg); +++ +++ // The reset of the type cehck; must be wired to a corresponding fast path. +++ // It does not repeat the fast path logic, so don't use it standalone. +++ // The tmp1_reg and tmp2_reg can be noreg, if no temps are avaliable. +++ // Updates the sub's secondary super cache as necessary. +++ void check_klass_subtype_slow_path(Register sub_klass, +++ Register super_klass, +++ Register tmp1_reg, +++ Register tmp2_reg, +++ Label* L_success, +++ Label* L_failure); +++ +++ void check_klass_subtype(Register sub_klass, +++ Register super_klass, +++ Register tmp_reg, +++ Label& L_success); +++ +++ Address argument_address(RegisterOrConstant arg_slot, int extra_slot_offset = 0); +++ +++ // only if +VerifyOops +++ void verify_oop(Register reg, const char* s = "broken oop"); +++ void verify_oop_addr(Address addr, const char* s = "broken oop addr"); +++ +++ void _verify_method_ptr(Register reg, const char* msg, const char* file, int line) {} +++ void _verify_klass_ptr(Register reg, const char* msg, const char* file, int line) {} +++ +++#define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__) +++#define verify_klass_ptr(reg) _verify_method_ptr(reg, "broken klass " #reg, __FILE__, __LINE__) +++ +++ // A more convenient access to fence for our purposes +++ // We used four bit to indicate the read and write bits in the predecessors and successors, +++ // and extended i for r, o for w if UseConservativeFence enabled. +++ enum Membar_mask_bits { +++ StoreStore = 0b0101, // (pred = ow + succ = ow) +++ LoadStore = 0b1001, // (pred = ir + succ = ow) +++ StoreLoad = 0b0110, // (pred = ow + succ = ir) +++ LoadLoad = 0b1010, // (pred = ir + succ = ir) +++ AnyAny = LoadStore | StoreLoad // (pred = iorw + succ = iorw) +++ }; +++ +++ void membar(uint32_t order_constraint); +++ +++ static void membar_mask_to_pred_succ(uint32_t order_constraint, +++ uint32_t& predecessor, uint32_t& successor) { +++ predecessor = (order_constraint >> 2) & 0x3; +++ successor = order_constraint & 0x3; +++ +++ // extend rw -> iorw: +++ // 01(w) -> 0101(ow) +++ // 10(r) -> 1010(ir) +++ // 11(rw)-> 1111(iorw) +++ if (UseConservativeFence) { +++ predecessor |= predecessor << 2; +++ successor |= successor << 2; +++ } +++ } +++ +++ static int pred_succ_to_membar_mask(uint32_t predecessor, uint32_t successor) { +++ return ((predecessor & 0x3) << 2) | (successor & 0x3); +++ } +++ +++ // prints msg, dumps registers and stops execution +++ void stop(const char* msg); +++ +++ static void debug64(char* msg, int64_t pc, int64_t regs[]); +++ +++ void unimplemented(const char* what = ""); +++ +++ void should_not_reach_here() { stop("should not reach here"); } +++ +++ static address target_addr_for_insn(address insn_addr); +++ +++ // Required platform-specific helpers for Label::patch_instructions. +++ // They _shadow_ the declarations in AbstractAssembler, which are undefined. +++ static int pd_patch_instruction_size(address branch, address target); +++ static void pd_patch_instruction(address branch, address target, const char* file = NULL, int line = 0) { +++ pd_patch_instruction_size(branch, target); +++ } +++ static address pd_call_destination(address branch) { +++ return target_addr_for_insn(branch); +++ } +++ +++ static int patch_oop(address insn_addr, address o); +++ address emit_trampoline_stub(int insts_call_instruction_offset, address target); +++ void emit_static_call_stub(); +++ +++ // The following 4 methods return the offset of the appropriate move instruction +++ +++ // Support for fast byte/short loading with zero extension (depending on particular CPU) +++ int load_unsigned_byte(Register dst, Address src); +++ int load_unsigned_short(Register dst, Address src); +++ +++ // Support for fast byte/short loading with sign extension (depending on particular CPU) +++ int load_signed_byte(Register dst, Address src); +++ int load_signed_short(Register dst, Address src); +++ +++ // Load and store values by size and signed-ness +++ void load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2 = noreg); +++ void store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2 = noreg); +++ +++ public: +++ // Standard pseudo instructions +++ inline void nop() { +++ addi(x0, x0, 0); +++ } +++ +++ inline void mv(Register Rd, Register Rs) { +++ if (Rd != Rs) { +++ addi(Rd, Rs, 0); +++ } +++ } +++ +++ inline void notr(Register Rd, Register Rs) { +++ xori(Rd, Rs, -1); +++ } +++ +++ inline void neg(Register Rd, Register Rs) { +++ sub(Rd, x0, Rs); +++ } +++ +++ inline void negw(Register Rd, Register Rs) { +++ subw(Rd, x0, Rs); +++ } +++ +++ inline void sext_w(Register Rd, Register Rs) { +++ addiw(Rd, Rs, 0); +++ } +++ +++ inline void zext_b(Register Rd, Register Rs) { +++ andi(Rd, Rs, 0xFF); +++ } +++ +++ inline void seqz(Register Rd, Register Rs) { +++ sltiu(Rd, Rs, 1); +++ } +++ +++ inline void snez(Register Rd, Register Rs) { +++ sltu(Rd, x0, Rs); +++ } +++ +++ inline void sltz(Register Rd, Register Rs) { +++ slt(Rd, Rs, x0); +++ } +++ +++ inline void sgtz(Register Rd, Register Rs) { +++ slt(Rd, x0, Rs); +++ } +++ +++ // Bit-manipulation extension pseudo instructions +++ // zero extend word +++ inline void zext_w(Register Rd, Register Rs) { +++ add_uw(Rd, Rs, zr); +++ } +++ +++ // Floating-point data-processing pseudo instructions +++ inline void fmv_s(FloatRegister Rd, FloatRegister Rs) { +++ if (Rd != Rs) { +++ fsgnj_s(Rd, Rs, Rs); +++ } +++ } +++ +++ inline void fabs_s(FloatRegister Rd, FloatRegister Rs) { +++ fsgnjx_s(Rd, Rs, Rs); +++ } +++ +++ inline void fneg_s(FloatRegister Rd, FloatRegister Rs) { +++ fsgnjn_s(Rd, Rs, Rs); +++ } +++ +++ inline void fmv_d(FloatRegister Rd, FloatRegister Rs) { +++ if (Rd != Rs) { +++ fsgnj_d(Rd, Rs, Rs); +++ } +++ } +++ +++ inline void fabs_d(FloatRegister Rd, FloatRegister Rs) { +++ fsgnjx_d(Rd, Rs, Rs); +++ } +++ +++ inline void fneg_d(FloatRegister Rd, FloatRegister Rs) { +++ fsgnjn_d(Rd, Rs, Rs); +++ } +++ +++ // Control and status pseudo instructions +++ void rdinstret(Register Rd); // read instruction-retired counter +++ void rdcycle(Register Rd); // read cycle counter +++ void rdtime(Register Rd); // read time +++ void csrr(Register Rd, unsigned csr); // read csr +++ void csrw(unsigned csr, Register Rs); // write csr +++ void csrs(unsigned csr, Register Rs); // set bits in csr +++ void csrc(unsigned csr, Register Rs); // clear bits in csr +++ void csrwi(unsigned csr, unsigned imm); +++ void csrsi(unsigned csr, unsigned imm); +++ void csrci(unsigned csr, unsigned imm); +++ void frcsr(Register Rd); // read float-point csr +++ void fscsr(Register Rd, Register Rs); // swap float-point csr +++ void fscsr(Register Rs); // write float-point csr +++ void frrm(Register Rd); // read float-point rounding mode +++ void fsrm(Register Rd, Register Rs); // swap float-point rounding mode +++ void fsrm(Register Rs); // write float-point rounding mode +++ void fsrmi(Register Rd, unsigned imm); +++ void fsrmi(unsigned imm); +++ void frflags(Register Rd); // read float-point exception flags +++ void fsflags(Register Rd, Register Rs); // swap float-point exception flags +++ void fsflags(Register Rs); // write float-point exception flags +++ void fsflagsi(Register Rd, unsigned imm); +++ void fsflagsi(unsigned imm); +++ +++ // Control transfer pseudo instructions +++ void beqz(Register Rs, const address dest); +++ void bnez(Register Rs, const address dest); +++ void blez(Register Rs, const address dest); +++ void bgez(Register Rs, const address dest); +++ void bltz(Register Rs, const address dest); +++ void bgtz(Register Rs, const address dest); +++ +++ void j(Label &l, Register temp = t0); +++ void j(const address dest, Register temp = t0); +++ void j(const Address &adr, Register temp = t0); +++ void jal(Label &l, Register temp = t0); +++ void jal(const address dest, Register temp = t0); +++ void jal(const Address &adr, Register temp = t0); +++ void jal(Register Rd, Label &L, Register temp = t0); +++ void jal(Register Rd, const address dest, Register temp = t0); +++ +++ //label +++ void beqz(Register Rs, Label &l, bool is_far = false); +++ void bnez(Register Rs, Label &l, bool is_far = false); +++ void blez(Register Rs, Label &l, bool is_far = false); +++ void bgez(Register Rs, Label &l, bool is_far = false); +++ void bltz(Register Rs, Label &l, bool is_far = false); +++ void bgtz(Register Rs, Label &l, bool is_far = false); +++ +++ void beq (Register Rs1, Register Rs2, Label &L, bool is_far = false); +++ void bne (Register Rs1, Register Rs2, Label &L, bool is_far = false); +++ void blt (Register Rs1, Register Rs2, Label &L, bool is_far = false); +++ void bge (Register Rs1, Register Rs2, Label &L, bool is_far = false); +++ void bltu(Register Rs1, Register Rs2, Label &L, bool is_far = false); +++ void bgeu(Register Rs1, Register Rs2, Label &L, bool is_far = false); +++ +++ void bgt (Register Rs, Register Rt, const address dest); +++ void ble (Register Rs, Register Rt, const address dest); +++ void bgtu(Register Rs, Register Rt, const address dest); +++ void bleu(Register Rs, Register Rt, const address dest); +++ +++ void bgt (Register Rs, Register Rt, Label &l, bool is_far = false); +++ void ble (Register Rs, Register Rt, Label &l, bool is_far = false); +++ void bgtu(Register Rs, Register Rt, Label &l, bool is_far = false); +++ void bleu(Register Rs, Register Rt, Label &l, bool is_far = false); +++ +++#define INSN_ENTRY_RELOC(result_type, header) \ +++ result_type header { \ +++ guarantee(rtype == relocInfo::internal_word_type, \ +++ "only internal_word_type relocs make sense here"); \ +++ relocate(InternalAddress(dest).rspec()); \ +++ IncompressibleRegion ir(this); /* relocations */ +++ +++#define INSN(NAME) \ +++ void NAME(Register Rs1, Register Rs2, const address dest) { \ +++ assert_cond(dest != NULL); \ +++ int64_t offset = dest - pc(); \ +++ guarantee(is_simm13(offset) && ((offset % 2) == 0), "offset is invalid."); \ +++ Assembler::NAME(Rs1, Rs2, offset); \ +++ } \ +++ INSN_ENTRY_RELOC(void, NAME(Register Rs1, Register Rs2, address dest, relocInfo::relocType rtype)) \ +++ NAME(Rs1, Rs2, dest); \ +++ } +++ +++ INSN(beq); +++ INSN(bne); +++ INSN(bge); +++ INSN(bgeu); +++ INSN(blt); +++ INSN(bltu); +++ +++#undef INSN +++ +++#undef INSN_ENTRY_RELOC +++ +++ void float_beq(FloatRegister Rs1, FloatRegister Rs2, Label &l, bool is_far = false, bool is_unordered = false); +++ void float_bne(FloatRegister Rs1, FloatRegister Rs2, Label &l, bool is_far = false, bool is_unordered = false); +++ void float_ble(FloatRegister Rs1, FloatRegister Rs2, Label &l, bool is_far = false, bool is_unordered = false); +++ void float_bge(FloatRegister Rs1, FloatRegister Rs2, Label &l, bool is_far = false, bool is_unordered = false); +++ void float_blt(FloatRegister Rs1, FloatRegister Rs2, Label &l, bool is_far = false, bool is_unordered = false); +++ void float_bgt(FloatRegister Rs1, FloatRegister Rs2, Label &l, bool is_far = false, bool is_unordered = false); +++ +++ void double_beq(FloatRegister Rs1, FloatRegister Rs2, Label &l, bool is_far = false, bool is_unordered = false); +++ void double_bne(FloatRegister Rs1, FloatRegister Rs2, Label &l, bool is_far = false, bool is_unordered = false); +++ void double_ble(FloatRegister Rs1, FloatRegister Rs2, Label &l, bool is_far = false, bool is_unordered = false); +++ void double_bge(FloatRegister Rs1, FloatRegister Rs2, Label &l, bool is_far = false, bool is_unordered = false); +++ void double_blt(FloatRegister Rs1, FloatRegister Rs2, Label &l, bool is_far = false, bool is_unordered = false); +++ void double_bgt(FloatRegister Rs1, FloatRegister Rs2, Label &l, bool is_far = false, bool is_unordered = false); +++ +++private: +++ int push_reg(unsigned int bitset, Register stack); +++ int pop_reg(unsigned int bitset, Register stack); +++ int push_fp(unsigned int bitset, Register stack); +++ int pop_fp(unsigned int bitset, Register stack); +++#ifdef COMPILER2 +++ int push_v(unsigned int bitset, Register stack); +++ int pop_v(unsigned int bitset, Register stack); +++#endif // COMPILER2 +++ +++public: +++ void push_reg(Register Rs); +++ void pop_reg(Register Rd); +++ void push_reg(RegSet regs, Register stack) { if (regs.bits()) push_reg(regs.bits(), stack); } +++ void pop_reg(RegSet regs, Register stack) { if (regs.bits()) pop_reg(regs.bits(), stack); } +++ void push_fp(FloatRegSet regs, Register stack) { if (regs.bits()) push_fp(regs.bits(), stack); } +++ void pop_fp(FloatRegSet regs, Register stack) { if (regs.bits()) pop_fp(regs.bits(), stack); } +++#ifdef COMPILER2 +++ void push_v(VectorRegSet regs, Register stack) { if (regs.bits()) push_v(regs.bits(), stack); } +++ void pop_v(VectorRegSet regs, Register stack) { if (regs.bits()) pop_v(regs.bits(), stack); } +++#endif // COMPILER2 +++ +++ // Push and pop everything that might be clobbered by a native +++ // runtime call except t0 and t1. (They are always +++ // temporary registers, so we don't have to protect them.) +++ // Additional registers can be excluded in a passed RegSet. +++ void push_call_clobbered_registers_except(RegSet exclude); +++ void pop_call_clobbered_registers_except(RegSet exclude); +++ +++ void push_call_clobbered_registers() { +++ push_call_clobbered_registers_except(RegSet()); +++ } +++ void pop_call_clobbered_registers() { +++ pop_call_clobbered_registers_except(RegSet()); +++ } +++ +++ void push_CPU_state(bool save_vectors = false, int vector_size_in_bytes = 0); +++ void pop_CPU_state(bool restore_vectors = false, int vector_size_in_bytes = 0); +++ +++ // if heap base register is used - reinit it with the correct value +++ void reinit_heapbase(); +++ +++ void bind(Label& L) { +++ Assembler::bind(L); +++ // fences across basic blocks should not be merged +++ code()->clear_last_insn(); +++ } +++ +++ typedef void (MacroAssembler::* compare_and_branch_insn)(Register Rs1, Register Rs2, const address dest); +++ typedef void (MacroAssembler::* compare_and_branch_label_insn)(Register Rs1, Register Rs2, Label &L, bool is_far); +++ typedef void (MacroAssembler::* jal_jalr_insn)(Register Rt, address dest); +++ typedef void (MacroAssembler::* load_insn_by_temp)(Register Rt, address dest, Register temp); +++ +++ void wrap_label(Register r, Label &L, Register t, load_insn_by_temp insn); +++ void wrap_label(Register r, Label &L, jal_jalr_insn insn); +++ void wrap_label(Register r1, Register r2, Label &L, +++ compare_and_branch_insn insn, +++ compare_and_branch_label_insn neg_insn, bool is_far = false); +++ +++ void la(Register Rd, Label &label); +++ void la(Register Rd, const address dest); +++ void la(Register Rd, const Address &adr); +++ +++ void li32(Register Rd, int32_t imm); +++ void li64(Register Rd, int64_t imm); +++ void li (Register Rd, int64_t imm); // optimized load immediate +++ +++ // mv +++ void mv(Register Rd, address addr) { li(Rd, (int64_t)addr); } +++ void mv(Register Rd, address addr, int32_t &offset) { +++ // Split address into a lower 12-bit sign-extended offset and the remainder, +++ // so that the offset could be encoded in jalr or load/store instruction. +++ offset = ((int32_t)(int64_t)addr << 20) >> 20; +++ li(Rd, (int64_t)addr - offset); +++ } +++ +++ template::value)> +++ inline void mv(Register Rd, T o) { li(Rd, (int64_t)o); } +++ +++ void mv(Register Rd, Address dest) { +++ assert(dest.getMode() == Address::literal, "Address mode should be Address::literal"); +++ relocate(dest.rspec(), [&] { +++ movptr(Rd, dest.target()); +++ }); +++ } +++ +++ void mv(Register Rd, RegisterOrConstant src) { +++ if (src.is_register()) { +++ mv(Rd, src.as_register()); +++ } else { +++ mv(Rd, src.as_constant()); +++ } +++ } +++ +++ void movptr(Register Rd, address addr, int32_t &offset); +++ +++ void movptr(Register Rd, address addr) { +++ int offset = 0; +++ movptr(Rd, addr, offset); +++ addi(Rd, Rd, offset); +++ } +++ +++ inline void movptr(Register Rd, uintptr_t imm64) { +++ movptr(Rd, (address)imm64); +++ } +++ +++ // arith +++ void add (Register Rd, Register Rn, int64_t increment, Register temp = t0); +++ void addw(Register Rd, Register Rn, int32_t increment, Register temp = t0); +++ void sub (Register Rd, Register Rn, int64_t decrement, Register temp = t0); +++ void subw(Register Rd, Register Rn, int32_t decrement, Register temp = t0); +++ +++#define INSN(NAME) \ +++ inline void NAME(Register Rd, Register Rs1, Register Rs2) { \ +++ Assembler::NAME(Rd, Rs1, Rs2); \ +++ } +++ +++ INSN(add); +++ INSN(addw); +++ INSN(sub); +++ INSN(subw); +++ +++#undef INSN +++ +++ // logic +++ void andrw(Register Rd, Register Rs1, Register Rs2); +++ void orrw(Register Rd, Register Rs1, Register Rs2); +++ void xorrw(Register Rd, Register Rs1, Register Rs2); +++ +++ // revb +++ void revb_h_h(Register Rd, Register Rs, Register tmp = t0); // reverse bytes in halfword in lower 16 bits, sign-extend +++ void revb_w_w(Register Rd, Register Rs, Register tmp1 = t0, Register tmp2 = t1); // reverse bytes in lower word, sign-extend +++ void revb_h_h_u(Register Rd, Register Rs, Register tmp = t0); // reverse bytes in halfword in lower 16 bits, zero-extend +++ void revb_h_w_u(Register Rd, Register Rs, Register tmp1 = t0, Register tmp2 = t1); // reverse bytes in halfwords in lower 32 bits, zero-extend +++ void revb_h_helper(Register Rd, Register Rs, Register tmp1 = t0, Register tmp2= t1); // reverse bytes in upper 16 bits (48:63) and move to lower +++ void revb_h(Register Rd, Register Rs, Register tmp1 = t0, Register tmp2= t1); // reverse bytes in each halfword +++ void revb_w(Register Rd, Register Rs, Register tmp1 = t0, Register tmp2= t1); // reverse bytes in each word +++ void revb(Register Rd, Register Rs, Register tmp1 = t0, Register tmp2 = t1); // reverse bytes in doubleword +++ +++ void ror_imm(Register dst, Register src, uint32_t shift, Register tmp = t0); +++ void andi(Register Rd, Register Rn, int64_t imm, Register tmp = t0); +++ void orptr(Address adr, RegisterOrConstant src, Register tmp1 = t0, Register tmp2 = t1); +++ +++// Load and Store Instructions +++#define INSN_ENTRY_RELOC(result_type, header) \ +++ result_type header { \ +++ guarantee(rtype == relocInfo::internal_word_type, \ +++ "only internal_word_type relocs make sense here"); \ +++ relocate(InternalAddress(dest).rspec()); \ +++ IncompressibleRegion ir(this); /* relocations */ +++ +++#define INSN(NAME) \ +++ void NAME(Register Rd, address dest) { \ +++ assert_cond(dest != NULL); \ +++ int64_t distance = dest - pc(); \ +++ if (is_simm32(distance)) { \ +++ auipc(Rd, (int32_t)distance + 0x800); \ +++ Assembler::NAME(Rd, Rd, ((int32_t)distance << 20) >> 20); \ +++ } else { \ +++ int32_t offset = 0; \ +++ movptr(Rd, dest, offset); \ +++ Assembler::NAME(Rd, Rd, offset); \ +++ } \ +++ } \ +++ INSN_ENTRY_RELOC(void, NAME(Register Rd, address dest, relocInfo::relocType rtype)) \ +++ NAME(Rd, dest); \ +++ } \ +++ void NAME(Register Rd, const Address &adr, Register temp = t0) { \ +++ switch (adr.getMode()) { \ +++ case Address::literal: { \ +++ relocate(adr.rspec()); \ +++ NAME(Rd, adr.target()); \ +++ break; \ +++ } \ +++ case Address::base_plus_offset: { \ +++ if (is_simm12(adr.offset())) { \ +++ Assembler::NAME(Rd, adr.base(), adr.offset()); \ +++ } else { \ +++ int32_t offset = ((int32_t)adr.offset() << 20) >> 20; \ +++ if (Rd == adr.base()) { \ +++ la(temp, Address(adr.base(), adr.offset() - offset)); \ +++ Assembler::NAME(Rd, temp, offset); \ +++ } else { \ +++ la(Rd, Address(adr.base(), adr.offset() - offset)); \ +++ Assembler::NAME(Rd, Rd, offset); \ +++ } \ +++ } \ +++ break; \ +++ } \ +++ default: \ +++ ShouldNotReachHere(); \ +++ } \ +++ } \ +++ void NAME(Register Rd, Label &L) { \ +++ wrap_label(Rd, L, &MacroAssembler::NAME); \ +++ } +++ +++ INSN(lb); +++ INSN(lbu); +++ INSN(lh); +++ INSN(lhu); +++ INSN(lw); +++ INSN(lwu); +++ INSN(ld); +++ +++#undef INSN +++ +++#define INSN(NAME) \ +++ void NAME(FloatRegister Rd, address dest, Register temp = t0) { \ +++ assert_cond(dest != NULL); \ +++ int64_t distance = dest - pc(); \ +++ if (is_simm32(distance)) { \ +++ auipc(temp, (int32_t)distance + 0x800); \ +++ Assembler::NAME(Rd, temp, ((int32_t)distance << 20) >> 20); \ +++ } else { \ +++ int32_t offset = 0; \ +++ movptr(temp, dest, offset); \ +++ Assembler::NAME(Rd, temp, offset); \ +++ } \ +++ } \ +++ INSN_ENTRY_RELOC(void, NAME(FloatRegister Rd, address dest, \ +++ relocInfo::relocType rtype, Register temp = t0)) \ +++ NAME(Rd, dest, temp); \ +++ } \ +++ void NAME(FloatRegister Rd, const Address &adr, Register temp = t0) { \ +++ switch (adr.getMode()) { \ +++ case Address::literal: { \ +++ relocate(adr.rspec()); \ +++ NAME(Rd, adr.target(), temp); \ +++ break; \ +++ } \ +++ case Address::base_plus_offset: { \ +++ if (is_simm12(adr.offset())) { \ +++ Assembler::NAME(Rd, adr.base(), adr.offset()); \ +++ } else { \ +++ int32_t offset = ((int32_t)adr.offset() << 20) >> 20; \ +++ la(temp, Address(adr.base(), adr.offset() - offset)); \ +++ Assembler::NAME(Rd, temp, offset); \ +++ } \ +++ break; \ +++ } \ +++ default: \ +++ ShouldNotReachHere(); \ +++ } \ +++ } +++ +++ INSN(flw); +++ INSN(fld); +++ +++#undef INSN +++ +++#define INSN(NAME, REGISTER) \ +++ INSN_ENTRY_RELOC(void, NAME(REGISTER Rs, address dest, \ +++ relocInfo::relocType rtype, Register temp = t0)) \ +++ NAME(Rs, dest, temp); \ +++ } +++ +++ INSN(sb, Register); +++ INSN(sh, Register); +++ INSN(sw, Register); +++ INSN(sd, Register); +++ INSN(fsw, FloatRegister); +++ INSN(fsd, FloatRegister); +++ +++#undef INSN +++ +++#define INSN(NAME) \ +++ void NAME(Register Rs, address dest, Register temp = t0) { \ +++ assert_cond(dest != NULL); \ +++ assert_different_registers(Rs, temp); \ +++ int64_t distance = dest - pc(); \ +++ if (is_simm32(distance)) { \ +++ auipc(temp, (int32_t)distance + 0x800); \ +++ Assembler::NAME(Rs, temp, ((int32_t)distance << 20) >> 20); \ +++ } else { \ +++ int32_t offset = 0; \ +++ movptr(temp, dest, offset); \ +++ Assembler::NAME(Rs, temp, offset); \ +++ } \ +++ } \ +++ void NAME(Register Rs, const Address &adr, Register temp = t0) { \ +++ switch (adr.getMode()) { \ +++ case Address::literal: { \ +++ assert_different_registers(Rs, temp); \ +++ relocate(adr.rspec()); \ +++ NAME(Rs, adr.target(), temp); \ +++ break; \ +++ } \ +++ case Address::base_plus_offset: { \ +++ if (is_simm12(adr.offset())) { \ +++ Assembler::NAME(Rs, adr.base(), adr.offset()); \ +++ } else { \ +++ assert_different_registers(Rs, temp); \ +++ int32_t offset = ((int32_t)adr.offset() << 20) >> 20; \ +++ la(temp, Address(adr.base(), adr.offset() - offset)); \ +++ Assembler::NAME(Rs, temp, offset); \ +++ } \ +++ break; \ +++ } \ +++ default: \ +++ ShouldNotReachHere(); \ +++ } \ +++ } +++ +++ INSN(sb); +++ INSN(sh); +++ INSN(sw); +++ INSN(sd); +++ +++#undef INSN +++ +++#define INSN(NAME) \ +++ void NAME(FloatRegister Rs, address dest, Register temp = t0) { \ +++ assert_cond(dest != NULL); \ +++ int64_t distance = dest - pc(); \ +++ if (is_simm32(distance)) { \ +++ auipc(temp, (int32_t)distance + 0x800); \ +++ Assembler::NAME(Rs, temp, ((int32_t)distance << 20) >> 20); \ +++ } else { \ +++ int32_t offset = 0; \ +++ movptr(temp, dest, offset); \ +++ Assembler::NAME(Rs, temp, offset); \ +++ } \ +++ } \ +++ void NAME(FloatRegister Rs, const Address &adr, Register temp = t0) { \ +++ switch (adr.getMode()) { \ +++ case Address::literal: { \ +++ relocate(adr.rspec()); \ +++ NAME(Rs, adr.target(), temp); \ +++ break; \ +++ } \ +++ case Address::base_plus_offset: { \ +++ if (is_simm12(adr.offset())) { \ +++ Assembler::NAME(Rs, adr.base(), adr.offset()); \ +++ } else { \ +++ int32_t offset = ((int32_t)adr.offset() << 20) >> 20; \ +++ la(temp, Address(adr.base(), adr.offset() - offset)); \ +++ Assembler::NAME(Rs, temp, offset); \ +++ } \ +++ break; \ +++ } \ +++ default: \ +++ ShouldNotReachHere(); \ +++ } \ +++ } +++ +++ INSN(fsw); +++ INSN(fsd); +++ +++#undef INSN +++ +++#undef INSN_ENTRY_RELOC +++ +++ void cmpxchg_obj_header(Register oldv, Register newv, Register obj, Register tmp, Label &succeed, Label *fail); +++ void cmpxchgptr(Register oldv, Register newv, Register addr, Register tmp, Label &succeed, Label *fail); +++ void cmpxchg(Register addr, Register expected, +++ Register new_val, +++ enum operand_size size, +++ Assembler::Aqrl acquire, Assembler::Aqrl release, +++ Register result, bool result_as_bool = false); +++ void cmpxchg_weak(Register addr, Register expected, +++ Register new_val, +++ enum operand_size size, +++ Assembler::Aqrl acquire, Assembler::Aqrl release, +++ Register result); +++ void cmpxchg_narrow_value_helper(Register addr, Register expected, +++ Register new_val, +++ enum operand_size size, +++ Register tmp1, Register tmp2, Register tmp3); +++ void cmpxchg_narrow_value(Register addr, Register expected, +++ Register new_val, +++ enum operand_size size, +++ Assembler::Aqrl acquire, Assembler::Aqrl release, +++ Register result, bool result_as_bool, +++ Register tmp1, Register tmp2, Register tmp3); +++ void weak_cmpxchg_narrow_value(Register addr, Register expected, +++ Register new_val, +++ enum operand_size size, +++ Assembler::Aqrl acquire, Assembler::Aqrl release, +++ Register result, +++ Register tmp1, Register tmp2, Register tmp3); +++ +++ void atomic_add(Register prev, RegisterOrConstant incr, Register addr); +++ void atomic_addw(Register prev, RegisterOrConstant incr, Register addr); +++ void atomic_addal(Register prev, RegisterOrConstant incr, Register addr); +++ void atomic_addalw(Register prev, RegisterOrConstant incr, Register addr); +++ +++ void atomic_xchg(Register prev, Register newv, Register addr); +++ void atomic_xchgw(Register prev, Register newv, Register addr); +++ void atomic_xchgal(Register prev, Register newv, Register addr); +++ void atomic_xchgalw(Register prev, Register newv, Register addr); +++ void atomic_xchgwu(Register prev, Register newv, Register addr); +++ void atomic_xchgalwu(Register prev, Register newv, Register addr); +++ +++ void atomic_incw(Register counter_addr, Register tmp); +++ void atomic_incw(Address counter_addr, Register tmp1, Register tmp2) { +++ la(tmp1, counter_addr); +++ atomic_incw(tmp1, tmp2); +++ } +++ +++ // Biased locking support +++ // lock_reg and obj_reg must be loaded up with the appropriate values. +++ // swap_reg is killed. +++ // tmp_reg must be supplied and must not be t0 or t1 +++ // Optional slow case is for implementations (interpreter and C1) which branch to +++ // slow case directly. Leaves condition codes set for C2's Fast_Lock done. +++ // Returns offset of first potentially-faulting instruction for null +++ // check info (currently consumed only by C1). If +++ // swap_reg_contains_mark is true then returns -1 as it as assumed +++ // the calling code has already passed any potential faults. +++ void biased_locking_enter(Register lock_reg, Register obj_reg, +++ Register swap_reg, Register tmp_Reg, +++ bool swap_reg_contains_mark, +++ Label& done, Label* slow_case = NULL, +++ BiasedLockingCounters* counters = NULL, +++ Register flag = noreg); +++ void biased_locking_exit(Register obj_reg, Register tmp_reg, Label& done, Register flag = noreg); +++ +++ static bool far_branches() { +++ return ReservedCodeCacheSize > branch_range; +++ } +++ +++ // Jumps that can reach anywhere in the code cache. +++ // Trashes tmp. +++ void far_call(Address entry, CodeBuffer *cbuf = NULL, Register tmp = t0); +++ void far_jump(Address entry, CodeBuffer *cbuf = NULL, Register tmp = t0); +++ +++ static int far_branch_size() { +++ if (far_branches()) { +++ return 2 * 4; // auipc + jalr, see far_call() & far_jump() +++ } else { +++ return 4; +++ } +++ } +++ +++ void load_byte_map_base(Register reg); +++ +++ void bang_stack_with_offset(int offset) { +++ // stack grows down, caller passes positive offset +++ assert(offset > 0, "must bang with negative offset"); +++ sub(t0, sp, offset); +++ sd(zr, Address(t0)); +++ } +++ +++ void la_patchable(Register reg1, const Address &dest, int32_t &offset); +++ +++ virtual void _call_Unimplemented(address call_site) { +++ mv(t1, call_site); +++ } +++ +++ #define call_Unimplemented() _call_Unimplemented((address)__PRETTY_FUNCTION__) +++ +++ // Frame creation and destruction shared between JITs. +++ void build_frame(int framesize); +++ void remove_frame(int framesize); +++ +++ void reserved_stack_check(); +++ +++ void get_polling_page(Register dest, relocInfo::relocType rtype); +++ void read_polling_page(Register r, int32_t offset, relocInfo::relocType rtype); +++ +++ address trampoline_call(Address entry, CodeBuffer* cbuf = NULL); +++ address ic_call(address entry, jint method_index = 0); +++ +++ // Support for memory inc/dec +++ // n.b. increment/decrement calls with an Address destination will +++ // need to use a scratch register to load the value to be +++ // incremented. increment/decrement calls which add or subtract a +++ // constant value other than sign-extended 12-bit immediate will need +++ // to use a 2nd scratch register to hold the constant. so, an address +++ // increment/decrement may trash both t0 and t1. +++ +++ void increment(const Address dst, int64_t value = 1, Register tmp1 = t0, Register tmp2 = t1); +++ void incrementw(const Address dst, int32_t value = 1, Register tmp1 = t0, Register tmp2 = t1); +++ +++ void decrement(const Address dst, int64_t value = 1, Register tmp1 = t0, Register tmp2 = t1); +++ void decrementw(const Address dst, int32_t value = 1, Register tmp1 = t0, Register tmp2 = t1); +++ +++ void cmpptr(Register src1, Address src2, Label& equal); +++ +++ void clinit_barrier(Register klass, Register tmp, Label* L_fast_path = NULL, Label* L_slow_path = NULL); +++ void load_method_holder_cld(Register result, Register method); +++ void load_method_holder(Register holder, Register method); +++ +++ void compute_index(Register str1, Register trailing_zeros, Register match_mask, +++ Register result, Register char_tmp, Register tmp, +++ bool haystack_isL); +++ void compute_match_mask(Register src, Register pattern, Register match_mask, +++ Register mask1, Register mask2); +++ +++#ifdef COMPILER2 +++ void mul_add(Register out, Register in, Register offset, +++ Register len, Register k, Register tmp); +++ void cad(Register dst, Register src1, Register src2, Register carry); +++ void cadc(Register dst, Register src1, Register src2, Register carry); +++ void adc(Register dst, Register src1, Register src2, Register carry); +++ void add2_with_carry(Register final_dest_hi, Register dest_hi, Register dest_lo, +++ Register src1, Register src2, Register carry); +++ void multiply_32_x_32_loop(Register x, Register xstart, Register x_xstart, +++ Register y, Register y_idx, Register z, +++ Register carry, Register product, +++ Register idx, Register kdx); +++ void multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart, +++ Register y, Register y_idx, Register z, +++ Register carry, Register product, +++ Register idx, Register kdx); +++ void multiply_128_x_128_loop(Register y, Register z, +++ Register carry, Register carry2, +++ Register idx, Register jdx, +++ Register yz_idx1, Register yz_idx2, +++ Register tmp, Register tmp3, Register tmp4, +++ Register tmp6, Register product_hi); +++ void multiply_to_len(Register x, Register xlen, Register y, Register ylen, +++ Register z, Register zlen, +++ Register tmp1, Register tmp2, Register tmp3, Register tmp4, +++ Register tmp5, Register tmp6, Register product_hi); +++#endif +++ +++ void inflate_lo32(Register Rd, Register Rs, Register tmp1 = t0, Register tmp2 = t1); +++ void inflate_hi32(Register Rd, Register Rs, Register tmp1 = t0, Register tmp2 = t1); +++ +++ void ctzc_bit(Register Rd, Register Rs, bool isLL = false, Register tmp1 = t0, Register tmp2 = t1); +++ +++ void zero_words(Register base, u_int64_t cnt); +++ address zero_words(Register ptr, Register cnt); +++ void fill_words(Register base, Register cnt, Register value); +++ void zero_memory(Register addr, Register len, Register tmp); +++ +++ // shift left by shamt and add +++ void shadd(Register Rd, Register Rs1, Register Rs2, Register tmp, int shamt); +++ +++ // test single bit in Rs, result is set to Rd +++ void test_bit(Register Rd, Register Rs, uint32_t bit_pos, Register tmp = t0); +++ +++ // Here the float instructions with safe deal with some exceptions. +++ // e.g. convert from NaN, +Inf, -Inf to int, float, double +++ // will trigger exception, we need to deal with these situations +++ // to get correct results. +++ void fcvt_w_s_safe(Register dst, FloatRegister src, Register tmp = t0); +++ void fcvt_l_s_safe(Register dst, FloatRegister src, Register tmp = t0); +++ void fcvt_w_d_safe(Register dst, FloatRegister src, Register tmp = t0); +++ void fcvt_l_d_safe(Register dst, FloatRegister src, Register tmp = t0); +++ +++ // vector load/store unit-stride instructions +++ void vlex_v(VectorRegister vd, Register base, Assembler::SEW sew, VectorMask vm = unmasked) { +++ switch (sew) { +++ case Assembler::e64: +++ vle64_v(vd, base, vm); +++ break; +++ case Assembler::e32: +++ vle32_v(vd, base, vm); +++ break; +++ case Assembler::e16: +++ vle16_v(vd, base, vm); +++ break; +++ case Assembler::e8: // fall through +++ default: +++ vle8_v(vd, base, vm); +++ break; +++ } +++ } +++ +++ void vsex_v(VectorRegister store_data, Register base, Assembler::SEW sew, VectorMask vm = unmasked) { +++ switch (sew) { +++ case Assembler::e64: +++ vse64_v(store_data, base, vm); +++ break; +++ case Assembler::e32: +++ vse32_v(store_data, base, vm); +++ break; +++ case Assembler::e16: +++ vse16_v(store_data, base, vm); +++ break; +++ case Assembler::e8: // fall through +++ default: +++ vse8_v(store_data, base, vm); +++ break; +++ } +++ } +++ +++ // vector pseudo instructions +++ inline void vmnot_m(VectorRegister vd, VectorRegister vs) { +++ vmnand_mm(vd, vs, vs); +++ } +++ +++ inline void vncvt_x_x_w(VectorRegister vd, VectorRegister vs, VectorMask vm) { +++ vnsrl_wx(vd, vs, x0, vm); +++ } +++ +++ inline void vfneg_v(VectorRegister vd, VectorRegister vs) { +++ vfsgnjn_vv(vd, vs, vs); +++ } +++ +++ static const int zero_words_block_size; +++ +++ void cast_primitive_type(BasicType type, Register Rt) { +++ switch (type) { +++ case T_BOOLEAN: +++ sltu(Rt, zr, Rt); +++ break; +++ case T_CHAR : +++ zero_extend(Rt, Rt, 16); +++ break; +++ case T_BYTE : +++ sign_extend(Rt, Rt, 8); +++ break; +++ case T_SHORT : +++ sign_extend(Rt, Rt, 16); +++ break; +++ case T_INT : +++ sign_extend(Rt, Rt, 32); +++ break; +++ case T_LONG : /* nothing to do */ break; +++ case T_VOID : /* nothing to do */ break; +++ case T_FLOAT : /* nothing to do */ break; +++ case T_DOUBLE : /* nothing to do */ break; +++ default: ShouldNotReachHere(); +++ } +++ } +++ +++ // float cmp with unordered_result +++ void float_compare(Register result, FloatRegister Rs1, FloatRegister Rs2, int unordered_result); +++ void double_compare(Register result, FloatRegister Rs1, FloatRegister Rs2, int unordered_result); +++ +++ // Zero/Sign-extend +++ void zero_extend(Register dst, Register src, int bits); +++ void sign_extend(Register dst, Register src, int bits); +++ +++ // compare src1 and src2 and get -1/0/1 in dst. +++ // if [src1 > src2], dst = 1; +++ // if [src1 == src2], dst = 0; +++ // if [src1 < src2], dst = -1; +++ void cmp_l2i(Register dst, Register src1, Register src2, Register tmp = t0); +++ +++ void call(const address dest, Register temp = t0) { +++ assert_cond(dest != NULL); +++ assert(temp != noreg, "expecting a register"); +++ int32_t offset = 0; +++ mv(temp, dest, offset); +++ jalr(x1, temp, offset); +++ } +++ +++ inline void ret() { +++ jalr(x0, x1, 0); +++ } +++ +++private: +++ +++#ifdef ASSERT +++ // Template short-hand support to clean-up after a failed call to trampoline +++ // call generation (see trampoline_call() below), when a set of Labels must +++ // be reset (before returning). +++ template +++ void reset_labels(Label& lbl, More&... more) { +++ lbl.reset(); reset_labels(more...); +++ } +++ template +++ void reset_labels(Label& lbl) { +++ lbl.reset(); +++ } +++#endif +++ void repne_scan(Register addr, Register value, Register count, Register tmp); +++ +++ void ld_constant(Register dest, const Address &const_addr) { +++ if (NearCpool) { +++ ld(dest, const_addr); +++ } else { +++ InternalAddress target(const_addr.target()); +++ relocate(target.rspec(), [&] { +++ int32_t offset; +++ la_patchable(dest, target, offset); +++ ld(dest, Address(dest, offset)); +++ }); +++ } +++ } +++ +++ int bitset_to_regs(unsigned int bitset, unsigned char* regs); +++ Address add_memory_helper(const Address dst, Register tmp); +++ +++ void load_reserved(Register addr, enum operand_size size, Assembler::Aqrl acquire); +++ void store_conditional(Register addr, Register new_val, enum operand_size size, Assembler::Aqrl release); +++ +++ void load_prototype_header(Register dst, Register src); +++}; +++ +++#ifdef ASSERT +++inline bool AbstractAssembler::pd_check_instruction_mark() { return false; } +++#endif +++ +++/** +++ * class SkipIfEqual: +++ * +++ * Instantiating this class will result in assembly code being output that will +++ * jump around any code emitted between the creation of the instance and it's +++ * automatic destruction at the end of a scope block, depending on the value of +++ * the flag passed to the constructor, which will be checked at run-time. +++ */ +++class SkipIfEqual { +++ private: +++ MacroAssembler* _masm; +++ Label _label; +++ +++ public: +++ SkipIfEqual(MacroAssembler*, const bool* flag_addr, bool value); +++ ~SkipIfEqual(); +++}; +++ +++#endif // CPU_RISCV_MACROASSEMBLER_RISCV_HPP ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/macroAssembler_riscv.inline.hpp ++@@ -0,0 +1,31 @@ +++/* +++ * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#ifndef CPU_RISCV_MACROASSEMBLER_RISCV_INLINE_HPP +++#define CPU_RISCV_MACROASSEMBLER_RISCV_INLINE_HPP +++ +++// Still empty. +++ +++#endif // CPU_RISCV_MACROASSEMBLER_RISCV_INLINE_HPP ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/matcher_riscv.hpp ++@@ -0,0 +1,169 @@ +++/* +++ * Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2021, 2022, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#ifndef CPU_RISCV_MATCHER_RISCV_HPP +++#define CPU_RISCV_MATCHER_RISCV_HPP +++ +++ // Defined within class Matcher +++ +++ // false => size gets scaled to BytesPerLong, ok. +++ static const bool init_array_count_is_in_bytes = false; +++ +++ // Whether this platform implements the scalable vector feature +++ static const bool implements_scalable_vector = true; +++ +++ static const bool supports_scalable_vector() { +++ return UseRVV; +++ } +++ +++ // riscv supports misaligned vectors store/load. +++ static constexpr bool misaligned_vectors_ok() { +++ return true; +++ } +++ +++ // Whether code generation need accurate ConvI2L types. +++ static const bool convi2l_type_required = false; +++ +++ // Does the CPU require late expand (see block.cpp for description of late expand)? +++ static const bool require_postalloc_expand = false; +++ +++ // Do we need to mask the count passed to shift instructions or does +++ // the cpu only look at the lower 5/6 bits anyway? +++ static const bool need_masked_shift_count = false; +++ +++ // No support for generic vector operands. +++ static const bool supports_generic_vector_operands = false; +++ +++ static constexpr bool isSimpleConstant64(jlong value) { +++ // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?. +++ // Probably always true, even if a temp register is required. +++ return true; +++ } +++ +++ // Use conditional move (CMOVL) +++ static constexpr int long_cmove_cost() { +++ // long cmoves are no more expensive than int cmoves +++ return 0; +++ } +++ +++ static constexpr int float_cmove_cost() { +++ // float cmoves are no more expensive than int cmoves +++ return 0; +++ } +++ +++ // This affects two different things: +++ // - how Decode nodes are matched +++ // - how ImplicitNullCheck opportunities are recognized +++ // If true, the matcher will try to remove all Decodes and match them +++ // (as operands) into nodes. NullChecks are not prepared to deal with +++ // Decodes by final_graph_reshaping(). +++ // If false, final_graph_reshaping() forces the decode behind the Cmp +++ // for a NullCheck. The matcher matches the Decode node into a register. +++ // Implicit_null_check optimization moves the Decode along with the +++ // memory operation back up before the NullCheck. +++ static bool narrow_oop_use_complex_address() { +++ return CompressedOops::shift() == 0; +++ } +++ +++ static bool narrow_klass_use_complex_address() { +++ return false; +++ } +++ +++ static bool const_oop_prefer_decode() { +++ // Prefer ConN+DecodeN over ConP in simple compressed oops mode. +++ return CompressedOops::base() == NULL; +++ } +++ +++ static bool const_klass_prefer_decode() { +++ // Prefer ConNKlass+DecodeNKlass over ConP in simple compressed klass mode. +++ return CompressedKlassPointers::base() == NULL; +++ } +++ +++ // Is it better to copy float constants, or load them directly from +++ // memory? Intel can load a float constant from a direct address, +++ // requiring no extra registers. Most RISCs will have to materialize +++ // an address into a register first, so they would do better to copy +++ // the constant from stack. +++ static const bool rematerialize_float_constants = false; +++ +++ // If CPU can load and store mis-aligned doubles directly then no +++ // fixup is needed. Else we split the double into 2 integer pieces +++ // and move it piece-by-piece. Only happens when passing doubles into +++ // C code as the Java calling convention forces doubles to be aligned. +++ static const bool misaligned_doubles_ok = true; +++ +++ // Advertise here if the CPU requires explicit rounding operations to implement strictfp mode. +++ static const bool strict_fp_requires_explicit_rounding = false; +++ +++ // Are floats converted to double when stored to stack during +++ // deoptimization? +++ static constexpr bool float_in_double() { return false; } +++ +++ // Do ints take an entire long register or just half? +++ // The relevant question is how the int is callee-saved: +++ // the whole long is written but de-opt'ing will have to extract +++ // the relevant 32 bits. +++ static const bool int_in_long = true; +++ +++ // Does the CPU supports vector variable shift instructions? +++ static constexpr bool supports_vector_variable_shifts(void) { +++ return false; +++ } +++ +++ // Does the CPU supports vector variable rotate instructions? +++ static constexpr bool supports_vector_variable_rotates(void) { +++ return false; +++ } +++ +++ // Does the CPU supports vector constant rotate instructions? +++ static constexpr bool supports_vector_constant_rotates(int shift) { +++ return false; +++ } +++ +++ // Does the CPU supports vector unsigned comparison instructions? +++ static const bool supports_vector_comparison_unsigned(int vlen, BasicType bt) { +++ return false; +++ } +++ +++ // Some microarchitectures have mask registers used on vectors +++ static const bool has_predicated_vectors(void) { +++ return false; +++ } +++ +++ // true means we have fast l2f convers +++ // false means that conversion is done by runtime call +++ static constexpr bool convL2FSupported(void) { +++ return true; +++ } +++ +++ // Implements a variant of EncodeISOArrayNode that encode ASCII only +++ static const bool supports_encode_ascii_array = false; +++ +++ // Returns pre-selection estimated size of a vector operation. +++ static int vector_op_pre_select_sz_estimate(int vopc, BasicType ety, int vlen) { +++ return 0; +++ } +++ +++#endif // CPU_RISCV_MATCHER_RISCV_HPP ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/methodHandles_riscv.cpp ++@@ -0,0 +1,455 @@ +++/* +++ * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2014, Red Hat Inc. All rights reserved. +++ * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#include "precompiled.hpp" +++#include "asm/macroAssembler.hpp" +++#include "classfile/javaClasses.inline.hpp" +++#include "classfile/vmClasses.hpp" +++#include "interpreter/interpreter.hpp" +++#include "interpreter/interpreterRuntime.hpp" +++#include "memory/allocation.inline.hpp" +++#include "prims/jvmtiExport.hpp" +++#include "prims/methodHandles.hpp" +++#include "runtime/flags/flagSetting.hpp" +++#include "runtime/frame.inline.hpp" +++#include "runtime/stubRoutines.hpp" +++ +++#define __ _masm-> +++ +++#ifdef PRODUCT +++#define BLOCK_COMMENT(str) /* nothing */ +++#else +++#define BLOCK_COMMENT(str) __ block_comment(str) +++#endif +++ +++#define BIND(label) bind(label); BLOCK_COMMENT(#label ":") +++ +++void MethodHandles::load_klass_from_Class(MacroAssembler* _masm, Register klass_reg) { +++ if (VerifyMethodHandles) { +++ verify_klass(_masm, klass_reg, VM_CLASS_ID(java_lang_Class), +++ "MH argument is a Class"); +++ } +++ __ ld(klass_reg, Address(klass_reg, java_lang_Class::klass_offset())); +++} +++ +++#ifdef ASSERT +++static int check_nonzero(const char* xname, int x) { +++ assert(x != 0, "%s should be nonzero", xname); +++ return x; +++} +++#define NONZERO(x) check_nonzero(#x, x) +++#else //ASSERT +++#define NONZERO(x) (x) +++#endif //PRODUCT +++ +++#ifdef ASSERT +++void MethodHandles::verify_klass(MacroAssembler* _masm, +++ Register obj, vmClassID klass_id, +++ const char* error_message) { +++ InstanceKlass** klass_addr = vmClasses::klass_addr_at(klass_id); +++ Klass* klass = vmClasses::klass_at(klass_id); +++ Register temp1 = t1; +++ Register temp2 = t0; // used by MacroAssembler::cmpptr +++ Label L_ok, L_bad; +++ BLOCK_COMMENT("verify_klass {"); +++ __ verify_oop(obj); +++ __ beqz(obj, L_bad); +++ __ push_reg(RegSet::of(temp1, temp2), sp); +++ __ load_klass(temp1, obj, temp2); +++ __ cmpptr(temp1, ExternalAddress((address) klass_addr), L_ok); +++ intptr_t super_check_offset = klass->super_check_offset(); +++ __ ld(temp1, Address(temp1, super_check_offset)); +++ __ cmpptr(temp1, ExternalAddress((address) klass_addr), L_ok); +++ __ pop_reg(RegSet::of(temp1, temp2), sp); +++ __ bind(L_bad); +++ __ stop(error_message); +++ __ BIND(L_ok); +++ __ pop_reg(RegSet::of(temp1, temp2), sp); +++ BLOCK_COMMENT("} verify_klass"); +++} +++ +++void MethodHandles::verify_ref_kind(MacroAssembler* _masm, int ref_kind, Register member_reg, Register temp) {} +++ +++#endif //ASSERT +++ +++void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, Register method, Register temp, +++ bool for_compiler_entry) { +++ assert(method == xmethod, "interpreter calling convention"); +++ Label L_no_such_method; +++ __ beqz(xmethod, L_no_such_method); +++ __ verify_method_ptr(method); +++ +++ if (!for_compiler_entry && JvmtiExport::can_post_interpreter_events()) { +++ Label run_compiled_code; +++ // JVMTI events, such as single-stepping, are implemented partly by avoiding running +++ // compiled code in threads for which the event is enabled. Check here for +++ // interp_only_mode if these events CAN be enabled. +++ +++ __ lwu(t0, Address(xthread, JavaThread::interp_only_mode_offset())); +++ __ beqz(t0, run_compiled_code); +++ __ ld(t0, Address(method, Method::interpreter_entry_offset())); +++ __ jr(t0); +++ __ BIND(run_compiled_code); +++ } +++ +++ const ByteSize entry_offset = for_compiler_entry ? Method::from_compiled_offset() : +++ Method::from_interpreted_offset(); +++ __ ld(t0,Address(method, entry_offset)); +++ __ jr(t0); +++ __ bind(L_no_such_method); +++ __ far_jump(RuntimeAddress(StubRoutines::throw_AbstractMethodError_entry())); +++} +++ +++void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm, +++ Register recv, Register method_temp, +++ Register temp2, +++ bool for_compiler_entry) { +++ BLOCK_COMMENT("jump_to_lambda_form {"); +++ // This is the initial entry point of a lazy method handle. +++ // After type checking, it picks up the invoker from the LambdaForm. +++ assert_different_registers(recv, method_temp, temp2); +++ assert(recv != noreg, "required register"); +++ assert(method_temp == xmethod, "required register for loading method"); +++ +++ // Load the invoker, as MH -> MH.form -> LF.vmentry +++ __ verify_oop(recv); +++ __ load_heap_oop(method_temp, Address(recv, NONZERO(java_lang_invoke_MethodHandle::form_offset())), temp2); +++ __ verify_oop(method_temp); +++ __ load_heap_oop(method_temp, Address(method_temp, NONZERO(java_lang_invoke_LambdaForm::vmentry_offset())), temp2); +++ __ verify_oop(method_temp); +++ __ load_heap_oop(method_temp, Address(method_temp, NONZERO(java_lang_invoke_MemberName::method_offset())), temp2); +++ __ verify_oop(method_temp); +++ __ access_load_at(T_ADDRESS, IN_HEAP, method_temp, Address(method_temp, NONZERO(java_lang_invoke_ResolvedMethodName::vmtarget_offset())), noreg, noreg); +++ +++ if (VerifyMethodHandles && !for_compiler_entry) { +++ // make sure recv is already on stack +++ __ ld(temp2, Address(method_temp, Method::const_offset())); +++ __ load_sized_value(temp2, +++ Address(temp2, ConstMethod::size_of_parameters_offset()), +++ sizeof(u2), /*is_signed*/ false); +++ Label L; +++ __ ld(t0, __ argument_address(temp2, -1)); +++ __ beq(recv, t0, L); +++ __ ld(x10, __ argument_address(temp2, -1)); +++ __ ebreak(); +++ __ BIND(L); +++ } +++ +++ jump_from_method_handle(_masm, method_temp, temp2, for_compiler_entry); +++ BLOCK_COMMENT("} jump_to_lambda_form"); +++} +++ +++// Code generation +++address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* _masm, +++ vmIntrinsics::ID iid) { +++ const bool not_for_compiler_entry = false; // this is the interpreter entry +++ assert(is_signature_polymorphic(iid), "expected invoke iid"); +++ if (iid == vmIntrinsics::_invokeGeneric || +++ iid == vmIntrinsics::_compiledLambdaForm) { +++ // Perhaps surprisingly, the symbolic references visible to Java are not directly used. +++ // They are linked to Java-generated adapters via MethodHandleNatives.linkMethod. +++ // They all allow an appendix argument. +++ __ ebreak(); // empty stubs make SG sick +++ return NULL; +++ } +++ +++ // No need in interpreter entry for linkToNative for now. +++ // Interpreter calls compiled entry through i2c. +++ if (iid == vmIntrinsics::_linkToNative) { +++ __ ebreak(); +++ return NULL; +++ } +++ +++ // x30: sender SP (must preserve; see prepare_to_jump_from_interpreted) +++ // xmethod: Method* +++ // x13: argument locator (parameter slot count, added to sp) +++ // x11: used as temp to hold mh or receiver +++ // x10, x29: garbage temps, blown away +++ Register argp = x13; // argument list ptr, live on error paths +++ Register mh = x11; // MH receiver; dies quickly and is recycled +++ +++ // here's where control starts out: +++ __ align(CodeEntryAlignment); +++ address entry_point = __ pc(); +++ +++ if (VerifyMethodHandles) { +++ assert(Method::intrinsic_id_size_in_bytes() == 2, "assuming Method::_intrinsic_id is u2"); +++ +++ Label L; +++ BLOCK_COMMENT("verify_intrinsic_id {"); +++ __ lhu(t0, Address(xmethod, Method::intrinsic_id_offset_in_bytes())); +++ __ mv(t1, (int) iid); +++ __ beq(t0, t1, L); +++ if (iid == vmIntrinsics::_linkToVirtual || +++ iid == vmIntrinsics::_linkToSpecial) { +++ // could do this for all kinds, but would explode assembly code size +++ trace_method_handle(_masm, "bad Method*::intrinsic_id"); +++ } +++ __ ebreak(); +++ __ bind(L); +++ BLOCK_COMMENT("} verify_intrinsic_id"); +++ } +++ +++ // First task: Find out how big the argument list is. +++ Address x13_first_arg_addr; +++ int ref_kind = signature_polymorphic_intrinsic_ref_kind(iid); +++ assert(ref_kind != 0 || iid == vmIntrinsics::_invokeBasic, "must be _invokeBasic or a linkTo intrinsic"); +++ if (ref_kind == 0 || MethodHandles::ref_kind_has_receiver(ref_kind)) { +++ __ ld(argp, Address(xmethod, Method::const_offset())); +++ __ load_sized_value(argp, +++ Address(argp, ConstMethod::size_of_parameters_offset()), +++ sizeof(u2), /*is_signed*/ false); +++ x13_first_arg_addr = __ argument_address(argp, -1); +++ } else { +++ DEBUG_ONLY(argp = noreg); +++ } +++ +++ if (!is_signature_polymorphic_static(iid)) { +++ __ ld(mh, x13_first_arg_addr); +++ DEBUG_ONLY(argp = noreg); +++ } +++ +++ // x13_first_arg_addr is live! +++ +++ trace_method_handle_interpreter_entry(_masm, iid); +++ if (iid == vmIntrinsics::_invokeBasic) { +++ generate_method_handle_dispatch(_masm, iid, mh, noreg, not_for_compiler_entry); +++ } else { +++ // Adjust argument list by popping the trailing MemberName argument. +++ Register recv = noreg; +++ if (MethodHandles::ref_kind_has_receiver(ref_kind)) { +++ // Load the receiver (not the MH; the actual MemberName's receiver) up from the interpreter stack. +++ __ ld(recv = x12, x13_first_arg_addr); +++ } +++ DEBUG_ONLY(argp = noreg); +++ Register xmember = xmethod; // MemberName ptr; incoming method ptr is dead now +++ __ pop_reg(xmember); // extract last argument +++ generate_method_handle_dispatch(_masm, iid, recv, xmember, not_for_compiler_entry); +++ } +++ +++ return entry_point; +++} +++ +++ +++void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm, +++ vmIntrinsics::ID iid, +++ Register receiver_reg, +++ Register member_reg, +++ bool for_compiler_entry) { +++ assert(is_signature_polymorphic(iid), "expected invoke iid"); +++ // temps used in this code are not used in *either* compiled or interpreted calling sequences +++ Register temp1 = x7; +++ Register temp2 = x28; +++ Register temp3 = x29; // x30 is live by this point: it contains the sender SP +++ if (for_compiler_entry) { +++ assert(receiver_reg == (iid == vmIntrinsics::_linkToStatic ? noreg : j_rarg0), "only valid assignment"); +++ assert_different_registers(temp1, j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5, j_rarg6, j_rarg7); +++ assert_different_registers(temp2, j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5, j_rarg6, j_rarg7); +++ assert_different_registers(temp3, j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5, j_rarg6, j_rarg7); +++ } +++ +++ assert_different_registers(temp1, temp2, temp3, receiver_reg); +++ assert_different_registers(temp1, temp2, temp3, member_reg); +++ +++ if (iid == vmIntrinsics::_invokeBasic || iid == vmIntrinsics::_linkToNative) { +++ if (iid == vmIntrinsics::_linkToNative) { +++ assert(for_compiler_entry, "only compiler entry is supported"); +++ } +++ // indirect through MH.form.vmentry.vmtarget +++ jump_to_lambda_form(_masm, receiver_reg, xmethod, temp1, for_compiler_entry); +++ } else { +++ // The method is a member invoker used by direct method handles. +++ if (VerifyMethodHandles) { +++ // make sure the trailing argument really is a MemberName (caller responsibility) +++ verify_klass(_masm, member_reg, VM_CLASS_ID(java_lang_invoke_MemberName), +++ "MemberName required for invokeVirtual etc."); +++ } +++ +++ Address member_clazz( member_reg, NONZERO(java_lang_invoke_MemberName::clazz_offset())); +++ Address member_vmindex( member_reg, NONZERO(java_lang_invoke_MemberName::vmindex_offset())); +++ Address member_vmtarget( member_reg, NONZERO(java_lang_invoke_MemberName::method_offset())); +++ Address vmtarget_method( xmethod, NONZERO(java_lang_invoke_ResolvedMethodName::vmtarget_offset())); +++ +++ Register temp1_recv_klass = temp1; +++ if (iid != vmIntrinsics::_linkToStatic) { +++ __ verify_oop(receiver_reg); +++ if (iid == vmIntrinsics::_linkToSpecial) { +++ // Don't actually load the klass; just null-check the receiver. +++ __ null_check(receiver_reg); +++ } else { +++ // load receiver klass itself +++ __ null_check(receiver_reg, oopDesc::klass_offset_in_bytes()); +++ __ load_klass(temp1_recv_klass, receiver_reg); +++ __ verify_klass_ptr(temp1_recv_klass); +++ } +++ BLOCK_COMMENT("check_receiver {"); +++ // The receiver for the MemberName must be in receiver_reg. +++ // Check the receiver against the MemberName.clazz +++ if (VerifyMethodHandles && iid == vmIntrinsics::_linkToSpecial) { +++ // Did not load it above... +++ __ load_klass(temp1_recv_klass, receiver_reg); +++ __ verify_klass_ptr(temp1_recv_klass); +++ } +++ if (VerifyMethodHandles && iid != vmIntrinsics::_linkToInterface) { +++ Label L_ok; +++ Register temp2_defc = temp2; +++ __ load_heap_oop(temp2_defc, member_clazz, temp3); +++ load_klass_from_Class(_masm, temp2_defc); +++ __ verify_klass_ptr(temp2_defc); +++ __ check_klass_subtype(temp1_recv_klass, temp2_defc, temp3, L_ok); +++ // If we get here, the type check failed! +++ __ ebreak(); +++ __ bind(L_ok); +++ } +++ BLOCK_COMMENT("} check_receiver"); +++ } +++ if (iid == vmIntrinsics::_linkToSpecial || +++ iid == vmIntrinsics::_linkToStatic) { +++ DEBUG_ONLY(temp1_recv_klass = noreg); // these guys didn't load the recv_klass +++ } +++ +++ // Live registers at this point: +++ // member_reg - MemberName that was the trailing argument +++ // temp1_recv_klass - klass of stacked receiver, if needed +++ // x30 - interpreter linkage (if interpreted) +++ // x11 ... x10 - compiler arguments (if compiled) +++ +++ Label L_incompatible_class_change_error; +++ switch (iid) { +++ case vmIntrinsics::_linkToSpecial: +++ if (VerifyMethodHandles) { +++ verify_ref_kind(_masm, JVM_REF_invokeSpecial, member_reg, temp3); +++ } +++ __ load_heap_oop(xmethod, member_vmtarget); +++ __ access_load_at(T_ADDRESS, IN_HEAP, xmethod, vmtarget_method, noreg, noreg); +++ break; +++ +++ case vmIntrinsics::_linkToStatic: +++ if (VerifyMethodHandles) { +++ verify_ref_kind(_masm, JVM_REF_invokeStatic, member_reg, temp3); +++ } +++ __ load_heap_oop(xmethod, member_vmtarget); +++ __ access_load_at(T_ADDRESS, IN_HEAP, xmethod, vmtarget_method, noreg, noreg); +++ break; +++ +++ case vmIntrinsics::_linkToVirtual: +++ { +++ // same as TemplateTable::invokevirtual, +++ // minus the CP setup and profiling: +++ +++ if (VerifyMethodHandles) { +++ verify_ref_kind(_masm, JVM_REF_invokeVirtual, member_reg, temp3); +++ } +++ +++ // pick out the vtable index from the MemberName, and then we can discard it: +++ Register temp2_index = temp2; +++ __ access_load_at(T_ADDRESS, IN_HEAP, temp2_index, member_vmindex, noreg, noreg); +++ +++ if (VerifyMethodHandles) { +++ Label L_index_ok; +++ __ bgez(temp2_index, L_index_ok); +++ __ ebreak(); +++ __ BIND(L_index_ok); +++ } +++ +++ // Note: The verifier invariants allow us to ignore MemberName.clazz and vmtarget +++ // at this point. And VerifyMethodHandles has already checked clazz, if needed. +++ +++ // get target Method* & entry point +++ __ lookup_virtual_method(temp1_recv_klass, temp2_index, xmethod); +++ break; +++ } +++ +++ case vmIntrinsics::_linkToInterface: +++ { +++ // same as TemplateTable::invokeinterface +++ // (minus the CP setup and profiling, with different argument motion) +++ if (VerifyMethodHandles) { +++ verify_ref_kind(_masm, JVM_REF_invokeInterface, member_reg, temp3); +++ } +++ +++ Register temp3_intf = temp3; +++ __ load_heap_oop(temp3_intf, member_clazz); +++ load_klass_from_Class(_masm, temp3_intf); +++ __ verify_klass_ptr(temp3_intf); +++ +++ Register rindex = xmethod; +++ __ access_load_at(T_ADDRESS, IN_HEAP, rindex, member_vmindex, noreg, noreg); +++ if (VerifyMethodHandles) { +++ Label L; +++ __ bgez(rindex, L); +++ __ ebreak(); +++ __ bind(L); +++ } +++ +++ // given intf, index, and recv klass, dispatch to the implementation method +++ __ lookup_interface_method(temp1_recv_klass, temp3_intf, +++ // note: next two args must be the same: +++ rindex, xmethod, +++ temp2, +++ L_incompatible_class_change_error); +++ break; +++ } +++ +++ default: +++ fatal("unexpected intrinsic %d: %s", vmIntrinsics::as_int(iid), vmIntrinsics::name_at(iid)); +++ break; +++ } +++ +++ // live at this point: xmethod, x30 (if interpreted) +++ +++ // After figuring out which concrete method to call, jump into it. +++ // Note that this works in the interpreter with no data motion. +++ // But the compiled version will require that r2_recv be shifted out. +++ __ verify_method_ptr(xmethod); +++ jump_from_method_handle(_masm, xmethod, temp1, for_compiler_entry); +++ if (iid == vmIntrinsics::_linkToInterface) { +++ __ bind(L_incompatible_class_change_error); +++ __ far_jump(RuntimeAddress(StubRoutines::throw_IncompatibleClassChangeError_entry())); +++ } +++ } +++ +++} +++ +++#ifndef PRODUCT +++void trace_method_handle_stub(const char* adaptername, +++ oopDesc* mh, +++ intptr_t* saved_regs, +++ intptr_t* entry_sp) { } +++ +++// The stub wraps the arguments in a struct on the stack to avoid +++// dealing with the different calling conventions for passing 6 +++// arguments. +++struct MethodHandleStubArguments { +++ const char* adaptername; +++ oopDesc* mh; +++ intptr_t* saved_regs; +++ intptr_t* entry_sp; +++}; +++void trace_method_handle_stub_wrapper(MethodHandleStubArguments* args) { } +++ +++void MethodHandles::trace_method_handle(MacroAssembler* _masm, const char* adaptername) { } +++#endif //PRODUCT ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/methodHandles_riscv.hpp ++@@ -0,0 +1,57 @@ +++/* +++ * Copyright (c) 2010, 2012, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++// Platform-specific definitions for method handles. +++// These definitions are inlined into class MethodHandles. +++ +++// Adapters +++enum /* platform_dependent_constants */ { +++ adapter_code_size = 32000 DEBUG_ONLY(+ 120000) +++}; +++ +++public: +++ +++ static void load_klass_from_Class(MacroAssembler* _masm, Register klass_reg); +++ +++ static void verify_klass(MacroAssembler* _masm, +++ Register obj, vmClassID klass_id, +++ const char* error_message = "wrong klass") NOT_DEBUG_RETURN; +++ +++ static void verify_method_handle(MacroAssembler* _masm, Register mh_reg) { +++ verify_klass(_masm, mh_reg, VM_CLASS_ID(java_lang_invoke_MethodHandle), +++ "reference is a MH"); +++ } +++ +++ static void verify_ref_kind(MacroAssembler* _masm, int ref_kind, Register member_reg, Register temp) NOT_DEBUG_RETURN; +++ +++ // Similar to InterpreterMacroAssembler::jump_from_interpreted. +++ // Takes care of special dispatch from single stepping too. +++ static void jump_from_method_handle(MacroAssembler* _masm, Register method, Register temp, +++ bool for_compiler_entry); +++ +++ static void jump_to_lambda_form(MacroAssembler* _masm, +++ Register recv, Register method_temp, +++ Register temp2, +++ bool for_compiler_entry); ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/nativeInst_riscv.cpp ++@@ -0,0 +1,441 @@ +++/* +++ * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved. +++ * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#include "precompiled.hpp" +++#include "asm/macroAssembler.hpp" +++#include "code/compiledIC.hpp" +++#include "memory/resourceArea.hpp" +++#include "nativeInst_riscv.hpp" +++#include "oops/oop.inline.hpp" +++#include "runtime/handles.hpp" +++#include "runtime/orderAccess.hpp" +++#include "runtime/safepoint.hpp" +++#include "runtime/sharedRuntime.hpp" +++#include "runtime/stubRoutines.hpp" +++#include "utilities/ostream.hpp" +++#ifdef COMPILER1 +++#include "c1/c1_Runtime1.hpp" +++#endif +++ +++Register NativeInstruction::extract_rs1(address instr) { +++ assert_cond(instr != NULL); +++ return as_Register(Assembler::extract(((unsigned*)instr)[0], 19, 15)); +++} +++ +++Register NativeInstruction::extract_rs2(address instr) { +++ assert_cond(instr != NULL); +++ return as_Register(Assembler::extract(((unsigned*)instr)[0], 24, 20)); +++} +++ +++Register NativeInstruction::extract_rd(address instr) { +++ assert_cond(instr != NULL); +++ return as_Register(Assembler::extract(((unsigned*)instr)[0], 11, 7)); +++} +++ +++uint32_t NativeInstruction::extract_opcode(address instr) { +++ assert_cond(instr != NULL); +++ return Assembler::extract(((unsigned*)instr)[0], 6, 0); +++} +++ +++uint32_t NativeInstruction::extract_funct3(address instr) { +++ assert_cond(instr != NULL); +++ return Assembler::extract(((unsigned*)instr)[0], 14, 12); +++} +++ +++bool NativeInstruction::is_pc_relative_at(address instr) { +++ // auipc + jalr +++ // auipc + addi +++ // auipc + load +++ // auipc + fload_load +++ return (is_auipc_at(instr)) && +++ (is_addi_at(instr + instruction_size) || +++ is_jalr_at(instr + instruction_size) || +++ is_load_at(instr + instruction_size) || +++ is_float_load_at(instr + instruction_size)) && +++ check_pc_relative_data_dependency(instr); +++} +++ +++// ie:ld(Rd, Label) +++bool NativeInstruction::is_load_pc_relative_at(address instr) { +++ return is_auipc_at(instr) && // auipc +++ is_ld_at(instr + instruction_size) && // ld +++ check_load_pc_relative_data_dependency(instr); +++} +++ +++bool NativeInstruction::is_movptr_at(address instr) { +++ return is_lui_at(instr) && // Lui +++ is_addi_at(instr + instruction_size) && // Addi +++ is_slli_shift_at(instr + instruction_size * 2, 11) && // Slli Rd, Rs, 11 +++ is_addi_at(instr + instruction_size * 3) && // Addi +++ is_slli_shift_at(instr + instruction_size * 4, 6) && // Slli Rd, Rs, 6 +++ (is_addi_at(instr + instruction_size * 5) || +++ is_jalr_at(instr + instruction_size * 5) || +++ is_load_at(instr + instruction_size * 5)) && // Addi/Jalr/Load +++ check_movptr_data_dependency(instr); +++} +++ +++bool NativeInstruction::is_li32_at(address instr) { +++ return is_lui_at(instr) && // lui +++ is_addiw_at(instr + instruction_size) && // addiw +++ check_li32_data_dependency(instr); +++} +++ +++bool NativeInstruction::is_li64_at(address instr) { +++ return is_lui_at(instr) && // lui +++ is_addi_at(instr + instruction_size) && // addi +++ is_slli_shift_at(instr + instruction_size * 2, 12) && // Slli Rd, Rs, 12 +++ is_addi_at(instr + instruction_size * 3) && // addi +++ is_slli_shift_at(instr + instruction_size * 4, 12) && // Slli Rd, Rs, 12 +++ is_addi_at(instr + instruction_size * 5) && // addi +++ is_slli_shift_at(instr + instruction_size * 6, 8) && // Slli Rd, Rs, 8 +++ is_addi_at(instr + instruction_size * 7) && // addi +++ check_li64_data_dependency(instr); +++} +++ +++void NativeCall::verify() { +++ assert(NativeCall::is_call_at((address)this), "unexpected code at call site"); +++} +++ +++address NativeCall::destination() const { +++ address addr = (address)this; +++ assert(NativeInstruction::is_jal_at(instruction_address()), "inst must be jal."); +++ address destination = MacroAssembler::target_addr_for_insn(instruction_address()); +++ +++ // Do we use a trampoline stub for this call? +++ CodeBlob* cb = CodeCache::find_blob_unsafe(addr); // Else we get assertion if nmethod is zombie. +++ assert(cb && cb->is_nmethod(), "sanity"); +++ nmethod *nm = (nmethod *)cb; +++ if (nm != NULL && nm->stub_contains(destination) && is_NativeCallTrampolineStub_at(destination)) { +++ // Yes we do, so get the destination from the trampoline stub. +++ const address trampoline_stub_addr = destination; +++ destination = nativeCallTrampolineStub_at(trampoline_stub_addr)->destination(); +++ } +++ +++ return destination; +++} +++ +++// Similar to replace_mt_safe, but just changes the destination. The +++// important thing is that free-running threads are able to execute this +++// call instruction at all times. +++// +++// Used in the runtime linkage of calls; see class CompiledIC. +++// +++// Add parameter assert_lock to switch off assertion +++// during code generation, where no patching lock is needed. +++void NativeCall::set_destination_mt_safe(address dest, bool assert_lock) { +++ assert(!assert_lock || +++ (Patching_lock->is_locked() || SafepointSynchronize::is_at_safepoint()) || +++ CompiledICLocker::is_safe(addr_at(0)), +++ "concurrent code patching"); +++ +++ ResourceMark rm; +++ address addr_call = addr_at(0); +++ assert(NativeCall::is_call_at(addr_call), "unexpected code at call site"); +++ +++ // Patch the constant in the call's trampoline stub. +++ address trampoline_stub_addr = get_trampoline(); +++ if (trampoline_stub_addr != NULL) { +++ assert (!is_NativeCallTrampolineStub_at(dest), "chained trampolines"); +++ nativeCallTrampolineStub_at(trampoline_stub_addr)->set_destination(dest); +++ } +++ +++ // Patch the call. +++ if (Assembler::reachable_from_branch_at(addr_call, dest)) { +++ set_destination(dest); +++ } else { +++ assert (trampoline_stub_addr != NULL, "we need a trampoline"); +++ set_destination(trampoline_stub_addr); +++ } +++ +++ ICache::invalidate_range(addr_call, instruction_size); +++} +++ +++address NativeCall::get_trampoline() { +++ address call_addr = addr_at(0); +++ +++ CodeBlob *code = CodeCache::find_blob(call_addr); +++ assert(code != NULL, "Could not find the containing code blob"); +++ +++ address jal_destination = MacroAssembler::pd_call_destination(call_addr); +++ if (code != NULL && code->contains(jal_destination) && is_NativeCallTrampolineStub_at(jal_destination)) { +++ return jal_destination; +++ } +++ +++ if (code != NULL && code->is_nmethod()) { +++ return trampoline_stub_Relocation::get_trampoline_for(call_addr, (nmethod*)code); +++ } +++ +++ return NULL; +++} +++ +++// Inserts a native call instruction at a given pc +++void NativeCall::insert(address code_pos, address entry) { Unimplemented(); } +++ +++//------------------------------------------------------------------- +++ +++void NativeMovConstReg::verify() { +++ if (!(nativeInstruction_at(instruction_address())->is_movptr() || +++ is_auipc_at(instruction_address()))) { +++ fatal("should be MOVPTR or AUIPC"); +++ } +++} +++ +++intptr_t NativeMovConstReg::data() const { +++ address addr = MacroAssembler::target_addr_for_insn(instruction_address()); +++ if (maybe_cpool_ref(instruction_address())) { +++ return *(intptr_t*)addr; +++ } else { +++ return (intptr_t)addr; +++ } +++} +++ +++void NativeMovConstReg::set_data(intptr_t x) { +++ if (maybe_cpool_ref(instruction_address())) { +++ address addr = MacroAssembler::target_addr_for_insn(instruction_address()); +++ *(intptr_t*)addr = x; +++ } else { +++ // Store x into the instruction stream. +++ MacroAssembler::pd_patch_instruction_size(instruction_address(), (address)x); +++ ICache::invalidate_range(instruction_address(), movptr_instruction_size); +++ } +++ +++ // Find and replace the oop/metadata corresponding to this +++ // instruction in oops section. +++ CodeBlob* cb = CodeCache::find_blob(instruction_address()); +++ nmethod* nm = cb->as_nmethod_or_null(); +++ if (nm != NULL) { +++ RelocIterator iter(nm, instruction_address(), next_instruction_address()); +++ while (iter.next()) { +++ if (iter.type() == relocInfo::oop_type) { +++ oop* oop_addr = iter.oop_reloc()->oop_addr(); +++ *oop_addr = cast_to_oop(x); +++ break; +++ } else if (iter.type() == relocInfo::metadata_type) { +++ Metadata** metadata_addr = iter.metadata_reloc()->metadata_addr(); +++ *metadata_addr = (Metadata*)x; +++ break; +++ } +++ } +++ } +++} +++ +++void NativeMovConstReg::print() { +++ tty->print_cr(PTR_FORMAT ": mov reg, " INTPTR_FORMAT, +++ p2i(instruction_address()), data()); +++} +++ +++//------------------------------------------------------------------- +++ +++int NativeMovRegMem::offset() const { +++ Unimplemented(); +++ return 0; +++} +++ +++void NativeMovRegMem::set_offset(int x) { Unimplemented(); } +++ +++void NativeMovRegMem::verify() { +++ Unimplemented(); +++} +++ +++//-------------------------------------------------------------------------------- +++ +++void NativeJump::verify() { } +++ +++ +++void NativeJump::check_verified_entry_alignment(address entry, address verified_entry) { +++ // Patching to not_entrant can happen while activations of the method are +++ // in use. The patching in that instance must happen only when certain +++ // alignment restrictions are true. These guarantees check those +++ // conditions. +++ +++ // Must be 4 bytes aligned +++ MacroAssembler::assert_alignment(verified_entry); +++} +++ +++ +++address NativeJump::jump_destination() const { +++ address dest = MacroAssembler::target_addr_for_insn(instruction_address()); +++ +++ // We use jump to self as the unresolved address which the inline +++ // cache code (and relocs) know about +++ // As a special case we also use sequence movptr(r,0), jalr(r,0) +++ // i.e. jump to 0 when we need leave space for a wide immediate +++ // load +++ +++ // return -1 if jump to self or to 0 +++ if ((dest == (address) this) || dest == 0) { +++ dest = (address) -1; +++ } +++ +++ return dest; +++}; +++ +++void NativeJump::set_jump_destination(address dest) { +++ // We use jump to self as the unresolved address which the inline +++ // cache code (and relocs) know about +++ if (dest == (address) -1) +++ dest = instruction_address(); +++ +++ MacroAssembler::pd_patch_instruction(instruction_address(), dest); +++ ICache::invalidate_range(instruction_address(), instruction_size); +++} +++ +++//------------------------------------------------------------------- +++ +++address NativeGeneralJump::jump_destination() const { +++ NativeMovConstReg* move = nativeMovConstReg_at(instruction_address()); +++ address dest = (address) move->data(); +++ +++ // We use jump to self as the unresolved address which the inline +++ // cache code (and relocs) know about +++ // As a special case we also use jump to 0 when first generating +++ // a general jump +++ +++ // return -1 if jump to self or to 0 +++ if ((dest == (address) this) || dest == 0) { +++ dest = (address) -1; +++ } +++ +++ return dest; +++} +++ +++//------------------------------------------------------------------- +++ +++bool NativeInstruction::is_safepoint_poll() { +++ return is_lwu_to_zr(address(this)); +++} +++ +++bool NativeInstruction::is_lwu_to_zr(address instr) { +++ assert_cond(instr != NULL); +++ return (extract_opcode(instr) == 0b0000011 && +++ extract_funct3(instr) == 0b110 && +++ extract_rd(instr) == zr); // zr +++} +++ +++// A 16-bit instruction with all bits ones is permanently reserved as an illegal instruction. +++bool NativeInstruction::is_sigill_zombie_not_entrant() { +++ // jvmci +++ return uint_at(0) == 0xffffffff; +++} +++ +++void NativeIllegalInstruction::insert(address code_pos) { +++ assert_cond(code_pos != NULL); +++ *(juint*)code_pos = 0xffffffff; // all bits ones is permanently reserved as an illegal instruction +++} +++ +++bool NativeInstruction::is_stop() { +++ return uint_at(0) == 0xc0101073; // an illegal instruction, 'csrrw x0, time, x0' +++} +++ +++//------------------------------------------------------------------- +++ +++// MT-safe inserting of a jump over a jump or a nop (used by +++// nmethod::make_not_entrant_or_zombie) +++ +++void NativeJump::patch_verified_entry(address entry, address verified_entry, address dest) { +++ +++ assert(dest == SharedRuntime::get_handle_wrong_method_stub(), "expected fixed destination of patch"); +++ +++ assert(nativeInstruction_at(verified_entry)->is_jump_or_nop() || +++ nativeInstruction_at(verified_entry)->is_sigill_zombie_not_entrant(), +++ "riscv cannot replace non-jump with jump"); +++ +++ check_verified_entry_alignment(entry, verified_entry); +++ +++ // Patch this nmethod atomically. +++ if (Assembler::reachable_from_branch_at(verified_entry, dest)) { +++ ptrdiff_t offset = dest - verified_entry; +++ guarantee(Assembler::is_simm21(offset) && ((offset % 2) == 0), +++ "offset is too large to be patched in one jal instruction."); // 1M +++ +++ uint32_t insn = 0; +++ address pInsn = (address)&insn; +++ Assembler::patch(pInsn, 31, 31, (offset >> 20) & 0x1); +++ Assembler::patch(pInsn, 30, 21, (offset >> 1) & 0x3ff); +++ Assembler::patch(pInsn, 20, 20, (offset >> 11) & 0x1); +++ Assembler::patch(pInsn, 19, 12, (offset >> 12) & 0xff); +++ Assembler::patch(pInsn, 11, 7, 0); // zero, no link jump +++ Assembler::patch(pInsn, 6, 0, 0b1101111); // j, (jal x0 offset) +++ *(unsigned int*)verified_entry = insn; +++ } else { +++ // We use an illegal instruction for marking a method as +++ // not_entrant or zombie. +++ NativeIllegalInstruction::insert(verified_entry); +++ } +++ +++ ICache::invalidate_range(verified_entry, instruction_size); +++} +++ +++void NativeGeneralJump::insert_unconditional(address code_pos, address entry) { +++ CodeBuffer cb(code_pos, instruction_size); +++ MacroAssembler a(&cb); +++ Assembler::IncompressibleRegion ir(&a); // Fixed length: see NativeGeneralJump::get_instruction_size() +++ +++ int32_t offset = 0; +++ a.movptr(t0, entry, offset); // lui, addi, slli, addi, slli +++ a.jalr(x0, t0, offset); // jalr +++ +++ ICache::invalidate_range(code_pos, instruction_size); +++} +++ +++// MT-safe patching of a long jump instruction. +++void NativeGeneralJump::replace_mt_safe(address instr_addr, address code_buffer) { +++ ShouldNotCallThis(); +++} +++ +++ +++address NativeCallTrampolineStub::destination(nmethod *nm) const { +++ return ptr_at(data_offset); +++} +++ +++void NativeCallTrampolineStub::set_destination(address new_destination) { +++ set_ptr_at(data_offset, new_destination); +++ OrderAccess::release(); +++} +++ +++uint32_t NativeMembar::get_kind() { +++ uint32_t insn = uint_at(0); +++ +++ uint32_t predecessor = Assembler::extract(insn, 27, 24); +++ uint32_t successor = Assembler::extract(insn, 23, 20); +++ +++ return MacroAssembler::pred_succ_to_membar_mask(predecessor, successor); +++} +++ +++void NativeMembar::set_kind(uint32_t order_kind) { +++ uint32_t predecessor = 0; +++ uint32_t successor = 0; +++ +++ MacroAssembler::membar_mask_to_pred_succ(order_kind, predecessor, successor); +++ +++ uint32_t insn = uint_at(0); +++ address pInsn = (address) &insn; +++ Assembler::patch(pInsn, 27, 24, predecessor); +++ Assembler::patch(pInsn, 23, 20, successor); +++ +++ address membar = addr_at(0); +++ *(unsigned int*) membar = insn; +++} ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/nativeInst_riscv.hpp ++@@ -0,0 +1,554 @@ +++/* +++ * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2014, 2018, Red Hat Inc. All rights reserved. +++ * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#ifndef CPU_RISCV_NATIVEINST_RISCV_HPP +++#define CPU_RISCV_NATIVEINST_RISCV_HPP +++ +++#include "asm/assembler.hpp" +++#include "runtime/icache.hpp" +++#include "runtime/os.hpp" +++ +++// We have interfaces for the following instructions: +++// - NativeInstruction +++// - - NativeCall +++// - - NativeMovConstReg +++// - - NativeMovRegMem +++// - - NativeJump +++// - - NativeGeneralJump +++// - - NativeIllegalInstruction +++// - - NativeCallTrampolineStub +++// - - NativeMembar +++ +++// The base class for different kinds of native instruction abstractions. +++// Provides the primitive operations to manipulate code relative to this. +++ +++class NativeCall; +++ +++class NativeInstruction { +++ friend class Relocation; +++ friend bool is_NativeCallTrampolineStub_at(address); +++ public: +++ enum { +++ instruction_size = 4, +++ compressed_instruction_size = 2, +++ }; +++ +++ juint encoding() const { +++ return uint_at(0); +++ } +++ +++ bool is_jal() const { return is_jal_at(addr_at(0)); } +++ bool is_movptr() const { return is_movptr_at(addr_at(0)); } +++ bool is_call() const { return is_call_at(addr_at(0)); } +++ bool is_jump() const { return is_jump_at(addr_at(0)); } +++ +++ static bool is_jal_at(address instr) { assert_cond(instr != NULL); return extract_opcode(instr) == 0b1101111; } +++ static bool is_jalr_at(address instr) { assert_cond(instr != NULL); return extract_opcode(instr) == 0b1100111 && extract_funct3(instr) == 0b000; } +++ static bool is_branch_at(address instr) { assert_cond(instr != NULL); return extract_opcode(instr) == 0b1100011; } +++ static bool is_ld_at(address instr) { assert_cond(instr != NULL); return is_load_at(instr) && extract_funct3(instr) == 0b011; } +++ static bool is_load_at(address instr) { assert_cond(instr != NULL); return extract_opcode(instr) == 0b0000011; } +++ static bool is_float_load_at(address instr) { assert_cond(instr != NULL); return extract_opcode(instr) == 0b0000111; } +++ static bool is_auipc_at(address instr) { assert_cond(instr != NULL); return extract_opcode(instr) == 0b0010111; } +++ static bool is_jump_at(address instr) { assert_cond(instr != NULL); return is_branch_at(instr) || is_jal_at(instr) || is_jalr_at(instr); } +++ static bool is_addi_at(address instr) { assert_cond(instr != NULL); return extract_opcode(instr) == 0b0010011 && extract_funct3(instr) == 0b000; } +++ static bool is_addiw_at(address instr) { assert_cond(instr != NULL); return extract_opcode(instr) == 0b0011011 && extract_funct3(instr) == 0b000; } +++ static bool is_lui_at(address instr) { assert_cond(instr != NULL); return extract_opcode(instr) == 0b0110111; } +++ static bool is_slli_shift_at(address instr, uint32_t shift) { +++ assert_cond(instr != NULL); +++ return (extract_opcode(instr) == 0b0010011 && // opcode field +++ extract_funct3(instr) == 0b001 && // funct3 field, select the type of operation +++ Assembler::extract(((unsigned*)instr)[0], 25, 20) == shift); // shamt field +++ } +++ +++ static Register extract_rs1(address instr); +++ static Register extract_rs2(address instr); +++ static Register extract_rd(address instr); +++ static uint32_t extract_opcode(address instr); +++ static uint32_t extract_funct3(address instr); +++ +++ // the instruction sequence of movptr is as below: +++ // lui +++ // addi +++ // slli +++ // addi +++ // slli +++ // addi/jalr/load +++ static bool check_movptr_data_dependency(address instr) { +++ address lui = instr; +++ address addi1 = lui + instruction_size; +++ address slli1 = addi1 + instruction_size; +++ address addi2 = slli1 + instruction_size; +++ address slli2 = addi2 + instruction_size; +++ address last_instr = slli2 + instruction_size; +++ return extract_rs1(addi1) == extract_rd(lui) && +++ extract_rs1(addi1) == extract_rd(addi1) && +++ extract_rs1(slli1) == extract_rd(addi1) && +++ extract_rs1(slli1) == extract_rd(slli1) && +++ extract_rs1(addi2) == extract_rd(slli1) && +++ extract_rs1(addi2) == extract_rd(addi2) && +++ extract_rs1(slli2) == extract_rd(addi2) && +++ extract_rs1(slli2) == extract_rd(slli2) && +++ extract_rs1(last_instr) == extract_rd(slli2); +++ } +++ +++ // the instruction sequence of li64 is as below: +++ // lui +++ // addi +++ // slli +++ // addi +++ // slli +++ // addi +++ // slli +++ // addi +++ static bool check_li64_data_dependency(address instr) { +++ address lui = instr; +++ address addi1 = lui + instruction_size; +++ address slli1 = addi1 + instruction_size; +++ address addi2 = slli1 + instruction_size; +++ address slli2 = addi2 + instruction_size; +++ address addi3 = slli2 + instruction_size; +++ address slli3 = addi3 + instruction_size; +++ address addi4 = slli3 + instruction_size; +++ return extract_rs1(addi1) == extract_rd(lui) && +++ extract_rs1(addi1) == extract_rd(addi1) && +++ extract_rs1(slli1) == extract_rd(addi1) && +++ extract_rs1(slli1) == extract_rd(slli1) && +++ extract_rs1(addi2) == extract_rd(slli1) && +++ extract_rs1(addi2) == extract_rd(addi2) && +++ extract_rs1(slli2) == extract_rd(addi2) && +++ extract_rs1(slli2) == extract_rd(slli2) && +++ extract_rs1(addi3) == extract_rd(slli2) && +++ extract_rs1(addi3) == extract_rd(addi3) && +++ extract_rs1(slli3) == extract_rd(addi3) && +++ extract_rs1(slli3) == extract_rd(slli3) && +++ extract_rs1(addi4) == extract_rd(slli3) && +++ extract_rs1(addi4) == extract_rd(addi4); +++ } +++ +++ // the instruction sequence of li32 is as below: +++ // lui +++ // addiw +++ static bool check_li32_data_dependency(address instr) { +++ address lui = instr; +++ address addiw = lui + instruction_size; +++ +++ return extract_rs1(addiw) == extract_rd(lui) && +++ extract_rs1(addiw) == extract_rd(addiw); +++ } +++ +++ // the instruction sequence of pc-relative is as below: +++ // auipc +++ // jalr/addi/load/float_load +++ static bool check_pc_relative_data_dependency(address instr) { +++ address auipc = instr; +++ address last_instr = auipc + instruction_size; +++ +++ return extract_rs1(last_instr) == extract_rd(auipc); +++ } +++ +++ // the instruction sequence of load_label is as below: +++ // auipc +++ // load +++ static bool check_load_pc_relative_data_dependency(address instr) { +++ address auipc = instr; +++ address load = auipc + instruction_size; +++ +++ return extract_rd(load) == extract_rd(auipc) && +++ extract_rs1(load) == extract_rd(load); +++ } +++ +++ static bool is_movptr_at(address instr); +++ static bool is_li32_at(address instr); +++ static bool is_li64_at(address instr); +++ static bool is_pc_relative_at(address branch); +++ static bool is_load_pc_relative_at(address branch); +++ +++ static bool is_call_at(address instr) { +++ if (is_jal_at(instr) || is_jalr_at(instr)) { +++ return true; +++ } +++ return false; +++ } +++ static bool is_lwu_to_zr(address instr); +++ +++ inline bool is_nop(); +++ inline bool is_jump_or_nop(); +++ bool is_safepoint_poll(); +++ bool is_sigill_zombie_not_entrant(); +++ bool is_stop(); +++ +++ protected: +++ address addr_at(int offset) const { return address(this) + offset; } +++ +++ jint int_at(int offset) const { return *(jint*) addr_at(offset); } +++ juint uint_at(int offset) const { return *(juint*) addr_at(offset); } +++ +++ address ptr_at(int offset) const { return *(address*) addr_at(offset); } +++ +++ oop oop_at (int offset) const { return *(oop*) addr_at(offset); } +++ +++ +++ void set_int_at(int offset, jint i) { *(jint*)addr_at(offset) = i; } +++ void set_uint_at(int offset, jint i) { *(juint*)addr_at(offset) = i; } +++ void set_ptr_at (int offset, address ptr) { *(address*) addr_at(offset) = ptr; } +++ void set_oop_at (int offset, oop o) { *(oop*) addr_at(offset) = o; } +++ +++ public: +++ +++ inline friend NativeInstruction* nativeInstruction_at(address addr); +++ +++ static bool maybe_cpool_ref(address instr) { +++ return is_auipc_at(instr); +++ } +++ +++ bool is_membar() { +++ return (uint_at(0) & 0x7f) == 0b1111 && extract_funct3(addr_at(0)) == 0; +++ } +++}; +++ +++inline NativeInstruction* nativeInstruction_at(address addr) { +++ return (NativeInstruction*)addr; +++} +++ +++// The natural type of an RISCV instruction is uint32_t +++inline NativeInstruction* nativeInstruction_at(uint32_t *addr) { +++ return (NativeInstruction*)addr; +++} +++ +++inline NativeCall* nativeCall_at(address addr); +++// The NativeCall is an abstraction for accessing/manipulating native +++// call instructions (used to manipulate inline caches, primitive & +++// DSO calls, etc.). +++ +++class NativeCall: public NativeInstruction { +++ public: +++ enum RISCV_specific_constants { +++ instruction_size = 4, +++ instruction_offset = 0, +++ displacement_offset = 0, +++ return_address_offset = 4 +++ }; +++ +++ address instruction_address() const { return addr_at(instruction_offset); } +++ address next_instruction_address() const { return addr_at(return_address_offset); } +++ address return_address() const { return addr_at(return_address_offset); } +++ address destination() const; +++ +++ void set_destination(address dest) { +++ assert(is_jal(), "Should be jal instruction!"); +++ intptr_t offset = (intptr_t)(dest - instruction_address()); +++ assert((offset & 0x1) == 0, "bad alignment"); +++ assert(Assembler::is_simm21(offset), "encoding constraint"); +++ unsigned int insn = 0b1101111; // jal +++ address pInsn = (address)(&insn); +++ Assembler::patch(pInsn, 31, 31, (offset >> 20) & 0x1); +++ Assembler::patch(pInsn, 30, 21, (offset >> 1) & 0x3ff); +++ Assembler::patch(pInsn, 20, 20, (offset >> 11) & 0x1); +++ Assembler::patch(pInsn, 19, 12, (offset >> 12) & 0xff); +++ Assembler::patch(pInsn, 11, 7, ra->encoding()); // Rd must be x1, need ra +++ set_int_at(displacement_offset, insn); +++ } +++ +++ void verify_alignment() {} // do nothing on riscv +++ void verify(); +++ void print(); +++ +++ // Creation +++ inline friend NativeCall* nativeCall_at(address addr); +++ inline friend NativeCall* nativeCall_before(address return_address); +++ +++ static bool is_call_before(address return_address) { +++ return is_call_at(return_address - NativeCall::return_address_offset); +++ } +++ +++ // MT-safe patching of a call instruction. +++ static void insert(address code_pos, address entry); +++ +++ static void replace_mt_safe(address instr_addr, address code_buffer); +++ +++ // Similar to replace_mt_safe, but just changes the destination. The +++ // important thing is that free-running threads are able to execute +++ // this call instruction at all times. If the call is an immediate BL +++ // instruction we can simply rely on atomicity of 32-bit writes to +++ // make sure other threads will see no intermediate states. +++ +++ // We cannot rely on locks here, since the free-running threads must run at +++ // full speed. +++ // +++ // Used in the runtime linkage of calls; see class CompiledIC. +++ // (Cf. 4506997 and 4479829, where threads witnessed garbage displacements.) +++ +++ // The parameter assert_lock disables the assertion during code generation. +++ void set_destination_mt_safe(address dest, bool assert_lock = true); +++ +++ address get_trampoline(); +++}; +++ +++inline NativeCall* nativeCall_at(address addr) { +++ assert_cond(addr != NULL); +++ NativeCall* call = (NativeCall*)(addr - NativeCall::instruction_offset); +++ DEBUG_ONLY(call->verify()); +++ return call; +++} +++ +++inline NativeCall* nativeCall_before(address return_address) { +++ assert_cond(return_address != NULL); +++ NativeCall* call = (NativeCall*)(return_address - NativeCall::return_address_offset); +++ DEBUG_ONLY(call->verify()); +++ return call; +++} +++ +++// An interface for accessing/manipulating native mov reg, imm instructions. +++// (used to manipulate inlined 64-bit data calls, etc.) +++class NativeMovConstReg: public NativeInstruction { +++ public: +++ enum RISCV_specific_constants { +++ movptr_instruction_size = 6 * NativeInstruction::instruction_size, // lui, addi, slli, addi, slli, addi. See movptr(). +++ load_pc_relative_instruction_size = 2 * NativeInstruction::instruction_size, // auipc, ld +++ instruction_offset = 0, +++ displacement_offset = 0 +++ }; +++ +++ address instruction_address() const { return addr_at(instruction_offset); } +++ address next_instruction_address() const { +++ // if the instruction at 5 * instruction_size is addi, +++ // it means a lui + addi + slli + addi + slli + addi instruction sequence, +++ // and the next instruction address should be addr_at(6 * instruction_size). +++ // However, when the instruction at 5 * instruction_size isn't addi, +++ // the next instruction address should be addr_at(5 * instruction_size) +++ if (nativeInstruction_at(instruction_address())->is_movptr()) { +++ if (is_addi_at(addr_at(movptr_instruction_size - NativeInstruction::instruction_size))) { +++ // Assume: lui, addi, slli, addi, slli, addi +++ return addr_at(movptr_instruction_size); +++ } else { +++ // Assume: lui, addi, slli, addi, slli +++ return addr_at(movptr_instruction_size - NativeInstruction::instruction_size); +++ } +++ } else if (is_load_pc_relative_at(instruction_address())) { +++ // Assume: auipc, ld +++ return addr_at(load_pc_relative_instruction_size); +++ } +++ guarantee(false, "Unknown instruction in NativeMovConstReg"); +++ return NULL; +++ } +++ +++ intptr_t data() const; +++ void set_data(intptr_t x); +++ +++ void flush() { +++ if (!maybe_cpool_ref(instruction_address())) { +++ ICache::invalidate_range(instruction_address(), movptr_instruction_size); +++ } +++ } +++ +++ void verify(); +++ void print(); +++ +++ // Creation +++ inline friend NativeMovConstReg* nativeMovConstReg_at(address addr); +++ inline friend NativeMovConstReg* nativeMovConstReg_before(address addr); +++}; +++ +++inline NativeMovConstReg* nativeMovConstReg_at(address addr) { +++ assert_cond(addr != NULL); +++ NativeMovConstReg* test = (NativeMovConstReg*)(addr - NativeMovConstReg::instruction_offset); +++ DEBUG_ONLY(test->verify()); +++ return test; +++} +++ +++inline NativeMovConstReg* nativeMovConstReg_before(address addr) { +++ assert_cond(addr != NULL); +++ NativeMovConstReg* test = (NativeMovConstReg*)(addr - NativeMovConstReg::instruction_size - NativeMovConstReg::instruction_offset); +++ DEBUG_ONLY(test->verify()); +++ return test; +++} +++ +++// RISCV should not use C1 runtime patching, but still implement +++// NativeMovRegMem to keep some compilers happy. +++class NativeMovRegMem: public NativeInstruction { +++ public: +++ enum RISCV_specific_constants { +++ instruction_size = NativeInstruction::instruction_size, +++ instruction_offset = 0, +++ data_offset = 0, +++ next_instruction_offset = NativeInstruction::instruction_size +++ }; +++ +++ int instruction_start() const { return instruction_offset; } +++ +++ address instruction_address() const { return addr_at(instruction_offset); } +++ +++ int num_bytes_to_end_of_patch() const { return instruction_offset + instruction_size; } +++ +++ int offset() const; +++ +++ void set_offset(int x); +++ +++ void add_offset_in_bytes(int add_offset) { +++ set_offset(offset() + add_offset); +++ } +++ +++ void verify(); +++ void print(); +++ +++ private: +++ inline friend NativeMovRegMem* nativeMovRegMem_at(address addr); +++}; +++ +++inline NativeMovRegMem* nativeMovRegMem_at(address addr) { +++ NativeMovRegMem* test = (NativeMovRegMem*)(addr - NativeMovRegMem::instruction_offset); +++ DEBUG_ONLY(test->verify()); +++ return test; +++} +++ +++class NativeJump: public NativeInstruction { +++ public: +++ enum RISCV_specific_constants { +++ instruction_size = NativeInstruction::instruction_size, +++ instruction_offset = 0, +++ data_offset = 0, +++ next_instruction_offset = NativeInstruction::instruction_size +++ }; +++ +++ address instruction_address() const { return addr_at(instruction_offset); } +++ address next_instruction_address() const { return addr_at(instruction_size); } +++ address jump_destination() const; +++ void set_jump_destination(address dest); +++ +++ // Creation +++ inline friend NativeJump* nativeJump_at(address address); +++ +++ void verify(); +++ +++ // Insertion of native jump instruction +++ static void insert(address code_pos, address entry); +++ // MT-safe insertion of native jump at verified method entry +++ static void check_verified_entry_alignment(address entry, address verified_entry); +++ static void patch_verified_entry(address entry, address verified_entry, address dest); +++}; +++ +++inline NativeJump* nativeJump_at(address addr) { +++ NativeJump* jump = (NativeJump*)(addr - NativeJump::instruction_offset); +++ DEBUG_ONLY(jump->verify()); +++ return jump; +++} +++ +++class NativeGeneralJump: public NativeJump { +++public: +++ enum RISCV_specific_constants { +++ instruction_size = 6 * NativeInstruction::instruction_size, // lui, addi, slli, addi, slli, jalr +++ instruction_offset = 0, +++ data_offset = 0, +++ next_instruction_offset = 6 * NativeInstruction::instruction_size // lui, addi, slli, addi, slli, jalr +++ }; +++ +++ address jump_destination() const; +++ +++ static void insert_unconditional(address code_pos, address entry); +++ static void replace_mt_safe(address instr_addr, address code_buffer); +++}; +++ +++inline NativeGeneralJump* nativeGeneralJump_at(address addr) { +++ assert_cond(addr != NULL); +++ NativeGeneralJump* jump = (NativeGeneralJump*)(addr); +++ debug_only(jump->verify();) +++ return jump; +++} +++ +++class NativeIllegalInstruction: public NativeInstruction { +++ public: +++ // Insert illegal opcode as specific address +++ static void insert(address code_pos); +++}; +++ +++inline bool NativeInstruction::is_nop() { +++ uint32_t insn = *(uint32_t*)addr_at(0); +++ return insn == 0x13; +++} +++ +++inline bool NativeInstruction::is_jump_or_nop() { +++ return is_nop() || is_jump(); +++} +++ +++// Call trampoline stubs. +++class NativeCallTrampolineStub : public NativeInstruction { +++ public: +++ +++ enum RISCV_specific_constants { +++ // Refer to function emit_trampoline_stub. +++ instruction_size = 3 * NativeInstruction::instruction_size + wordSize, // auipc + ld + jr + target address +++ data_offset = 3 * NativeInstruction::instruction_size, // auipc + ld + jr +++ }; +++ +++ address destination(nmethod *nm = NULL) const; +++ void set_destination(address new_destination); +++ ptrdiff_t destination_offset() const; +++}; +++ +++inline bool is_NativeCallTrampolineStub_at(address addr) { +++ // Ensure that the stub is exactly +++ // ld t0, L--->auipc + ld +++ // jr t0 +++ // L: +++ +++ // judge inst + register + imm +++ // 1). check the instructions: auipc + ld + jalr +++ // 2). check if auipc[11:7] == t0 and ld[11:7] == t0 and ld[19:15] == t0 && jr[19:15] == t0 +++ // 3). check if the offset in ld[31:20] equals the data_offset +++ assert_cond(addr != NULL); +++ const int instr_size = NativeInstruction::instruction_size; +++ if (NativeInstruction::is_auipc_at(addr) && +++ NativeInstruction::is_ld_at(addr + instr_size) && +++ NativeInstruction::is_jalr_at(addr + 2 * instr_size) && +++ (NativeInstruction::extract_rd(addr) == x5) && +++ (NativeInstruction::extract_rd(addr + instr_size) == x5) && +++ (NativeInstruction::extract_rs1(addr + instr_size) == x5) && +++ (NativeInstruction::extract_rs1(addr + 2 * instr_size) == x5) && +++ (Assembler::extract(((unsigned*)addr)[1], 31, 20) == NativeCallTrampolineStub::data_offset)) { +++ return true; +++ } +++ return false; +++} +++ +++inline NativeCallTrampolineStub* nativeCallTrampolineStub_at(address addr) { +++ assert_cond(addr != NULL); +++ assert(is_NativeCallTrampolineStub_at(addr), "no call trampoline found"); +++ return (NativeCallTrampolineStub*)addr; +++} +++ +++class NativeMembar : public NativeInstruction { +++public: +++ uint32_t get_kind(); +++ void set_kind(uint32_t order_kind); +++}; +++ +++inline NativeMembar *NativeMembar_at(address addr) { +++ assert_cond(addr != NULL); +++ assert(nativeInstruction_at(addr)->is_membar(), "no membar found"); +++ return (NativeMembar*)addr; +++} +++ +++#endif // CPU_RISCV_NATIVEINST_RISCV_HPP ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/registerMap_riscv.cpp ++@@ -0,0 +1,45 @@ +++/* +++ * Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2021, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#include "precompiled.hpp" +++#include "runtime/registerMap.hpp" +++#include "vmreg_riscv.inline.hpp" +++ +++address RegisterMap::pd_location(VMReg base_reg, int slot_idx) const { +++ if (base_reg->is_VectorRegister()) { +++ assert(base_reg->is_concrete(), "must pass base reg"); +++ int base_reg_enc = (base_reg->value() - ConcreteRegisterImpl::max_fpr) / +++ VectorRegisterImpl::max_slots_per_register; +++ intptr_t offset_in_bytes = slot_idx * VMRegImpl::stack_slot_size; +++ address base_location = location(base_reg); +++ if (base_location != NULL) { +++ return base_location + offset_in_bytes; +++ } else { +++ return NULL; +++ } +++ } else { +++ return location(base_reg->next(slot_idx)); +++ } +++} ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/registerMap_riscv.hpp ++@@ -0,0 +1,43 @@ +++/* +++ * Copyright (c) 1998, 2020, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#ifndef CPU_RISCV_REGISTERMAP_RISCV_HPP +++#define CPU_RISCV_REGISTERMAP_RISCV_HPP +++ +++// machine-dependent implemention for register maps +++ friend class frame; +++ +++ private: +++ // This is the hook for finding a register in an "well-known" location, +++ // such as a register block of a predetermined format. +++ address pd_location(VMReg reg) const { return NULL; } +++ address pd_location(VMReg base_reg, int slot_idx) const; +++ +++ // no PD state to clear or copy: +++ void pd_clear() {} +++ void pd_initialize() {} +++ void pd_initialize_from(const RegisterMap* map) {} +++ +++#endif // CPU_RISCV_REGISTERMAP_RISCV_HPP ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/register_definitions_riscv.cpp ++@@ -0,0 +1,193 @@ +++/* +++ * Copyright (c) 2002, 2020, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2014, Red Hat Inc. All rights reserved. +++ * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#include "precompiled.hpp" +++#include "asm/assembler.hpp" +++#include "asm/macroAssembler.inline.hpp" +++#include "asm/register.hpp" +++#include "interp_masm_riscv.hpp" +++#include "register_riscv.hpp" +++ +++REGISTER_DEFINITION(Register, noreg); +++ +++REGISTER_DEFINITION(Register, x0); +++REGISTER_DEFINITION(Register, x1); +++REGISTER_DEFINITION(Register, x2); +++REGISTER_DEFINITION(Register, x3); +++REGISTER_DEFINITION(Register, x4); +++REGISTER_DEFINITION(Register, x5); +++REGISTER_DEFINITION(Register, x6); +++REGISTER_DEFINITION(Register, x7); +++REGISTER_DEFINITION(Register, x8); +++REGISTER_DEFINITION(Register, x9); +++REGISTER_DEFINITION(Register, x10); +++REGISTER_DEFINITION(Register, x11); +++REGISTER_DEFINITION(Register, x12); +++REGISTER_DEFINITION(Register, x13); +++REGISTER_DEFINITION(Register, x14); +++REGISTER_DEFINITION(Register, x15); +++REGISTER_DEFINITION(Register, x16); +++REGISTER_DEFINITION(Register, x17); +++REGISTER_DEFINITION(Register, x18); +++REGISTER_DEFINITION(Register, x19); +++REGISTER_DEFINITION(Register, x20); +++REGISTER_DEFINITION(Register, x21); +++REGISTER_DEFINITION(Register, x22); +++REGISTER_DEFINITION(Register, x23); +++REGISTER_DEFINITION(Register, x24); +++REGISTER_DEFINITION(Register, x25); +++REGISTER_DEFINITION(Register, x26); +++REGISTER_DEFINITION(Register, x27); +++REGISTER_DEFINITION(Register, x28); +++REGISTER_DEFINITION(Register, x29); +++REGISTER_DEFINITION(Register, x30); +++REGISTER_DEFINITION(Register, x31); +++ +++REGISTER_DEFINITION(FloatRegister, fnoreg); +++ +++REGISTER_DEFINITION(FloatRegister, f0); +++REGISTER_DEFINITION(FloatRegister, f1); +++REGISTER_DEFINITION(FloatRegister, f2); +++REGISTER_DEFINITION(FloatRegister, f3); +++REGISTER_DEFINITION(FloatRegister, f4); +++REGISTER_DEFINITION(FloatRegister, f5); +++REGISTER_DEFINITION(FloatRegister, f6); +++REGISTER_DEFINITION(FloatRegister, f7); +++REGISTER_DEFINITION(FloatRegister, f8); +++REGISTER_DEFINITION(FloatRegister, f9); +++REGISTER_DEFINITION(FloatRegister, f10); +++REGISTER_DEFINITION(FloatRegister, f11); +++REGISTER_DEFINITION(FloatRegister, f12); +++REGISTER_DEFINITION(FloatRegister, f13); +++REGISTER_DEFINITION(FloatRegister, f14); +++REGISTER_DEFINITION(FloatRegister, f15); +++REGISTER_DEFINITION(FloatRegister, f16); +++REGISTER_DEFINITION(FloatRegister, f17); +++REGISTER_DEFINITION(FloatRegister, f18); +++REGISTER_DEFINITION(FloatRegister, f19); +++REGISTER_DEFINITION(FloatRegister, f20); +++REGISTER_DEFINITION(FloatRegister, f21); +++REGISTER_DEFINITION(FloatRegister, f22); +++REGISTER_DEFINITION(FloatRegister, f23); +++REGISTER_DEFINITION(FloatRegister, f24); +++REGISTER_DEFINITION(FloatRegister, f25); +++REGISTER_DEFINITION(FloatRegister, f26); +++REGISTER_DEFINITION(FloatRegister, f27); +++REGISTER_DEFINITION(FloatRegister, f28); +++REGISTER_DEFINITION(FloatRegister, f29); +++REGISTER_DEFINITION(FloatRegister, f30); +++REGISTER_DEFINITION(FloatRegister, f31); +++ +++REGISTER_DEFINITION(VectorRegister, vnoreg); +++ +++REGISTER_DEFINITION(VectorRegister, v0); +++REGISTER_DEFINITION(VectorRegister, v1); +++REGISTER_DEFINITION(VectorRegister, v2); +++REGISTER_DEFINITION(VectorRegister, v3); +++REGISTER_DEFINITION(VectorRegister, v4); +++REGISTER_DEFINITION(VectorRegister, v5); +++REGISTER_DEFINITION(VectorRegister, v6); +++REGISTER_DEFINITION(VectorRegister, v7); +++REGISTER_DEFINITION(VectorRegister, v8); +++REGISTER_DEFINITION(VectorRegister, v9); +++REGISTER_DEFINITION(VectorRegister, v10); +++REGISTER_DEFINITION(VectorRegister, v11); +++REGISTER_DEFINITION(VectorRegister, v12); +++REGISTER_DEFINITION(VectorRegister, v13); +++REGISTER_DEFINITION(VectorRegister, v14); +++REGISTER_DEFINITION(VectorRegister, v15); +++REGISTER_DEFINITION(VectorRegister, v16); +++REGISTER_DEFINITION(VectorRegister, v17); +++REGISTER_DEFINITION(VectorRegister, v18); +++REGISTER_DEFINITION(VectorRegister, v19); +++REGISTER_DEFINITION(VectorRegister, v20); +++REGISTER_DEFINITION(VectorRegister, v21); +++REGISTER_DEFINITION(VectorRegister, v22); +++REGISTER_DEFINITION(VectorRegister, v23); +++REGISTER_DEFINITION(VectorRegister, v24); +++REGISTER_DEFINITION(VectorRegister, v25); +++REGISTER_DEFINITION(VectorRegister, v26); +++REGISTER_DEFINITION(VectorRegister, v27); +++REGISTER_DEFINITION(VectorRegister, v28); +++REGISTER_DEFINITION(VectorRegister, v29); +++REGISTER_DEFINITION(VectorRegister, v30); +++REGISTER_DEFINITION(VectorRegister, v31); +++ +++REGISTER_DEFINITION(Register, c_rarg0); +++REGISTER_DEFINITION(Register, c_rarg1); +++REGISTER_DEFINITION(Register, c_rarg2); +++REGISTER_DEFINITION(Register, c_rarg3); +++REGISTER_DEFINITION(Register, c_rarg4); +++REGISTER_DEFINITION(Register, c_rarg5); +++REGISTER_DEFINITION(Register, c_rarg6); +++REGISTER_DEFINITION(Register, c_rarg7); +++ +++REGISTER_DEFINITION(FloatRegister, c_farg0); +++REGISTER_DEFINITION(FloatRegister, c_farg1); +++REGISTER_DEFINITION(FloatRegister, c_farg2); +++REGISTER_DEFINITION(FloatRegister, c_farg3); +++REGISTER_DEFINITION(FloatRegister, c_farg4); +++REGISTER_DEFINITION(FloatRegister, c_farg5); +++REGISTER_DEFINITION(FloatRegister, c_farg6); +++REGISTER_DEFINITION(FloatRegister, c_farg7); +++ +++REGISTER_DEFINITION(Register, j_rarg0); +++REGISTER_DEFINITION(Register, j_rarg1); +++REGISTER_DEFINITION(Register, j_rarg2); +++REGISTER_DEFINITION(Register, j_rarg3); +++REGISTER_DEFINITION(Register, j_rarg4); +++REGISTER_DEFINITION(Register, j_rarg5); +++REGISTER_DEFINITION(Register, j_rarg6); +++REGISTER_DEFINITION(Register, j_rarg7); +++ +++REGISTER_DEFINITION(FloatRegister, j_farg0); +++REGISTER_DEFINITION(FloatRegister, j_farg1); +++REGISTER_DEFINITION(FloatRegister, j_farg2); +++REGISTER_DEFINITION(FloatRegister, j_farg3); +++REGISTER_DEFINITION(FloatRegister, j_farg4); +++REGISTER_DEFINITION(FloatRegister, j_farg5); +++REGISTER_DEFINITION(FloatRegister, j_farg6); +++REGISTER_DEFINITION(FloatRegister, j_farg7); +++ +++REGISTER_DEFINITION(Register, zr); +++REGISTER_DEFINITION(Register, gp); +++REGISTER_DEFINITION(Register, tp); +++REGISTER_DEFINITION(Register, xmethod); +++REGISTER_DEFINITION(Register, ra); +++REGISTER_DEFINITION(Register, sp); +++REGISTER_DEFINITION(Register, fp); +++REGISTER_DEFINITION(Register, xheapbase); +++REGISTER_DEFINITION(Register, xcpool); +++REGISTER_DEFINITION(Register, xmonitors); +++REGISTER_DEFINITION(Register, xlocals); +++REGISTER_DEFINITION(Register, xthread); +++REGISTER_DEFINITION(Register, xbcp); +++REGISTER_DEFINITION(Register, xdispatch); +++REGISTER_DEFINITION(Register, esp); +++ +++REGISTER_DEFINITION(Register, t0); +++REGISTER_DEFINITION(Register, t1); +++REGISTER_DEFINITION(Register, t2); ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/register_riscv.cpp ++@@ -0,0 +1,69 @@ +++/* +++ * Copyright (c) 2000, 2020, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#include "precompiled.hpp" +++#include "register_riscv.hpp" +++ +++const int ConcreteRegisterImpl::max_gpr = RegisterImpl::number_of_registers * +++ RegisterImpl::max_slots_per_register; +++ +++const int ConcreteRegisterImpl::max_fpr = +++ ConcreteRegisterImpl::max_gpr + +++ FloatRegisterImpl::number_of_registers * FloatRegisterImpl::max_slots_per_register; +++ +++const int ConcreteRegisterImpl::max_vpr = +++ ConcreteRegisterImpl::max_fpr + +++ VectorRegisterImpl::number_of_registers * VectorRegisterImpl::max_slots_per_register; +++ +++ +++const char* RegisterImpl::name() const { +++ static const char *const names[number_of_registers] = { +++ "zr", "ra", "sp", "gp", "tp", "t0", "t1", "t2", "fp", "x9", +++ "c_rarg0", "c_rarg1", "c_rarg2", "c_rarg3", "c_rarg4", "c_rarg5", "c_rarg6", "c_rarg7", +++ "x18", "x19", "esp", "xdispatch", "xbcp", "xthread", "xlocals", +++ "xmonitors", "xcpool", "xheapbase", "x28", "x29", "x30", "xmethod" +++ }; +++ return is_valid() ? names[encoding()] : "noreg"; +++} +++ +++const char* FloatRegisterImpl::name() const { +++ static const char *const names[number_of_registers] = { +++ "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", +++ "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15", +++ "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23", +++ "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31" +++ }; +++ return is_valid() ? names[encoding()] : "noreg"; +++} +++ +++const char* VectorRegisterImpl::name() const { +++ static const char *const names[number_of_registers] = { +++ "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", +++ "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", +++ "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", +++ "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" +++ }; +++ return is_valid() ? names[encoding()] : "noreg"; +++} ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/register_riscv.hpp ++@@ -0,0 +1,385 @@ +++/* +++ * Copyright (c) 2000, 2020, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#ifndef CPU_RISCV_REGISTER_RISCV_HPP +++#define CPU_RISCV_REGISTER_RISCV_HPP +++ +++#include "asm/register.hpp" +++ +++#define CSR_FFLAGS 0x001 // Floating-Point Accrued Exceptions. +++#define CSR_FRM 0x002 // Floating-Point Dynamic Rounding Mode. +++#define CSR_FCSR 0x003 // Floating-Point Control and Status Register (frm + fflags). +++#define CSR_VSTART 0x008 // Vector start position +++#define CSR_VXSAT 0x009 // Fixed-Point Saturate Flag +++#define CSR_VXRM 0x00A // Fixed-Point Rounding Mode +++#define CSR_VCSR 0x00F // Vector control and status register +++#define CSR_VL 0xC20 // Vector length +++#define CSR_VTYPE 0xC21 // Vector data type register +++#define CSR_VLENB 0xC22 // VLEN/8 (vector register length in bytes) +++#define CSR_CYCLE 0xc00 // Cycle counter for RDCYCLE instruction. +++#define CSR_TIME 0xc01 // Timer for RDTIME instruction. +++#define CSR_INSTRET 0xc02 // Instructions-retired counter for RDINSTRET instruction. +++ +++class VMRegImpl; +++typedef VMRegImpl* VMReg; +++ +++// Use Register as shortcut +++class RegisterImpl; +++typedef RegisterImpl* Register; +++ +++inline Register as_Register(int encoding) { +++ return (Register)(intptr_t) encoding; +++} +++ +++class RegisterImpl: public AbstractRegisterImpl { +++ +++ public: +++ enum { +++ number_of_registers = 32, +++ max_slots_per_register = 2, +++ +++ // integer registers x8 - x15 and floating-point registers f8 - f15 are allocatable +++ // for compressed instructions. See Table 17.2 in spec. +++ compressed_register_base = 8, +++ compressed_register_top = 15, +++ }; +++ +++ // derived registers, offsets, and addresses +++ Register successor() const { return as_Register(encoding() + 1); } +++ +++ // construction +++ inline friend Register as_Register(int encoding); +++ +++ VMReg as_VMReg(); +++ +++ // accessors +++ int encoding() const { assert(is_valid(), "invalid register"); return encoding_nocheck(); } +++ bool is_valid() const { return 0 <= encoding_nocheck() && encoding_nocheck() < number_of_registers; } +++ const char* name() const; +++ int encoding_nocheck() const { return (intptr_t)this; } +++ +++ // Return the bit which represents this register. This is intended +++ // to be ORed into a bitmask: for usage see class AbstractRegSet below. +++ unsigned long bit(bool should_set = true) const { return should_set ? 1 << encoding() : 0; } +++ +++ // for rvc +++ int compressed_encoding() const { +++ assert(is_compressed_valid(), "invalid compressed register"); +++ return encoding() - compressed_register_base; +++ } +++ +++ int compressed_encoding_nocheck() const { +++ return encoding_nocheck() - compressed_register_base; +++ } +++ +++ bool is_compressed_valid() const { +++ return encoding_nocheck() >= compressed_register_base && +++ encoding_nocheck() <= compressed_register_top; +++ } +++}; +++ +++// The integer registers of the RISCV architecture +++ +++CONSTANT_REGISTER_DECLARATION(Register, noreg, (-1)); +++ +++CONSTANT_REGISTER_DECLARATION(Register, x0, (0)); +++CONSTANT_REGISTER_DECLARATION(Register, x1, (1)); +++CONSTANT_REGISTER_DECLARATION(Register, x2, (2)); +++CONSTANT_REGISTER_DECLARATION(Register, x3, (3)); +++CONSTANT_REGISTER_DECLARATION(Register, x4, (4)); +++CONSTANT_REGISTER_DECLARATION(Register, x5, (5)); +++CONSTANT_REGISTER_DECLARATION(Register, x6, (6)); +++CONSTANT_REGISTER_DECLARATION(Register, x7, (7)); +++CONSTANT_REGISTER_DECLARATION(Register, x8, (8)); +++CONSTANT_REGISTER_DECLARATION(Register, x9, (9)); +++CONSTANT_REGISTER_DECLARATION(Register, x10, (10)); +++CONSTANT_REGISTER_DECLARATION(Register, x11, (11)); +++CONSTANT_REGISTER_DECLARATION(Register, x12, (12)); +++CONSTANT_REGISTER_DECLARATION(Register, x13, (13)); +++CONSTANT_REGISTER_DECLARATION(Register, x14, (14)); +++CONSTANT_REGISTER_DECLARATION(Register, x15, (15)); +++CONSTANT_REGISTER_DECLARATION(Register, x16, (16)); +++CONSTANT_REGISTER_DECLARATION(Register, x17, (17)); +++CONSTANT_REGISTER_DECLARATION(Register, x18, (18)); +++CONSTANT_REGISTER_DECLARATION(Register, x19, (19)); +++CONSTANT_REGISTER_DECLARATION(Register, x20, (20)); +++CONSTANT_REGISTER_DECLARATION(Register, x21, (21)); +++CONSTANT_REGISTER_DECLARATION(Register, x22, (22)); +++CONSTANT_REGISTER_DECLARATION(Register, x23, (23)); +++CONSTANT_REGISTER_DECLARATION(Register, x24, (24)); +++CONSTANT_REGISTER_DECLARATION(Register, x25, (25)); +++CONSTANT_REGISTER_DECLARATION(Register, x26, (26)); +++CONSTANT_REGISTER_DECLARATION(Register, x27, (27)); +++CONSTANT_REGISTER_DECLARATION(Register, x28, (28)); +++CONSTANT_REGISTER_DECLARATION(Register, x29, (29)); +++CONSTANT_REGISTER_DECLARATION(Register, x30, (30)); +++CONSTANT_REGISTER_DECLARATION(Register, x31, (31)); +++ +++// Use FloatRegister as shortcut +++class FloatRegisterImpl; +++typedef FloatRegisterImpl* FloatRegister; +++ +++inline FloatRegister as_FloatRegister(int encoding) { +++ return (FloatRegister)(intptr_t) encoding; +++} +++ +++// The implementation of floating point registers for the architecture +++class FloatRegisterImpl: public AbstractRegisterImpl { +++ +++ public: +++ enum { +++ number_of_registers = 32, +++ max_slots_per_register = 2, +++ +++ // float registers in the range of [f8~f15] correspond to RVC. Please see Table 16.2 in spec. +++ compressed_register_base = 8, +++ compressed_register_top = 15, +++ }; +++ +++ // construction +++ inline friend FloatRegister as_FloatRegister(int encoding); +++ +++ VMReg as_VMReg(); +++ +++ // derived registers, offsets, and addresses +++ FloatRegister successor() const { +++ return as_FloatRegister((encoding() + 1)); +++ } +++ +++ // accessors +++ int encoding() const { assert(is_valid(), "invalid register"); return encoding_nocheck(); } +++ int encoding_nocheck() const { return (intptr_t)this; } +++ int is_valid() const { return 0 <= encoding_nocheck() && encoding_nocheck() < number_of_registers; } +++ const char* name() const; +++ +++ // for rvc +++ int compressed_encoding() const { +++ assert(is_compressed_valid(), "invalid compressed register"); +++ return encoding() - compressed_register_base; +++ } +++ +++ int compressed_encoding_nocheck() const { +++ return encoding_nocheck() - compressed_register_base; +++ } +++ +++ bool is_compressed_valid() const { +++ return encoding_nocheck() >= compressed_register_base && +++ encoding_nocheck() <= compressed_register_top; +++ } +++}; +++ +++// The float registers of the RISCV architecture +++ +++CONSTANT_REGISTER_DECLARATION(FloatRegister, fnoreg , (-1)); +++ +++CONSTANT_REGISTER_DECLARATION(FloatRegister, f0 , ( 0)); +++CONSTANT_REGISTER_DECLARATION(FloatRegister, f1 , ( 1)); +++CONSTANT_REGISTER_DECLARATION(FloatRegister, f2 , ( 2)); +++CONSTANT_REGISTER_DECLARATION(FloatRegister, f3 , ( 3)); +++CONSTANT_REGISTER_DECLARATION(FloatRegister, f4 , ( 4)); +++CONSTANT_REGISTER_DECLARATION(FloatRegister, f5 , ( 5)); +++CONSTANT_REGISTER_DECLARATION(FloatRegister, f6 , ( 6)); +++CONSTANT_REGISTER_DECLARATION(FloatRegister, f7 , ( 7)); +++CONSTANT_REGISTER_DECLARATION(FloatRegister, f8 , ( 8)); +++CONSTANT_REGISTER_DECLARATION(FloatRegister, f9 , ( 9)); +++CONSTANT_REGISTER_DECLARATION(FloatRegister, f10 , (10)); +++CONSTANT_REGISTER_DECLARATION(FloatRegister, f11 , (11)); +++CONSTANT_REGISTER_DECLARATION(FloatRegister, f12 , (12)); +++CONSTANT_REGISTER_DECLARATION(FloatRegister, f13 , (13)); +++CONSTANT_REGISTER_DECLARATION(FloatRegister, f14 , (14)); +++CONSTANT_REGISTER_DECLARATION(FloatRegister, f15 , (15)); +++CONSTANT_REGISTER_DECLARATION(FloatRegister, f16 , (16)); +++CONSTANT_REGISTER_DECLARATION(FloatRegister, f17 , (17)); +++CONSTANT_REGISTER_DECLARATION(FloatRegister, f18 , (18)); +++CONSTANT_REGISTER_DECLARATION(FloatRegister, f19 , (19)); +++CONSTANT_REGISTER_DECLARATION(FloatRegister, f20 , (20)); +++CONSTANT_REGISTER_DECLARATION(FloatRegister, f21 , (21)); +++CONSTANT_REGISTER_DECLARATION(FloatRegister, f22 , (22)); +++CONSTANT_REGISTER_DECLARATION(FloatRegister, f23 , (23)); +++CONSTANT_REGISTER_DECLARATION(FloatRegister, f24 , (24)); +++CONSTANT_REGISTER_DECLARATION(FloatRegister, f25 , (25)); +++CONSTANT_REGISTER_DECLARATION(FloatRegister, f26 , (26)); +++CONSTANT_REGISTER_DECLARATION(FloatRegister, f27 , (27)); +++CONSTANT_REGISTER_DECLARATION(FloatRegister, f28 , (28)); +++CONSTANT_REGISTER_DECLARATION(FloatRegister, f29 , (29)); +++CONSTANT_REGISTER_DECLARATION(FloatRegister, f30 , (30)); +++CONSTANT_REGISTER_DECLARATION(FloatRegister, f31 , (31)); +++ +++// Use VectorRegister as shortcut +++class VectorRegisterImpl; +++typedef VectorRegisterImpl* VectorRegister; +++ +++inline VectorRegister as_VectorRegister(int encoding) { +++ return (VectorRegister)(intptr_t) encoding; +++} +++ +++// The implementation of vector registers for RVV +++class VectorRegisterImpl: public AbstractRegisterImpl { +++ +++ public: +++ enum { +++ number_of_registers = 32, +++ max_slots_per_register = 4 +++ }; +++ +++ // construction +++ inline friend VectorRegister as_VectorRegister(int encoding); +++ +++ VMReg as_VMReg(); +++ +++ // derived registers, offsets, and addresses +++ VectorRegister successor() const { return as_VectorRegister(encoding() + 1); } +++ +++ // accessors +++ int encoding() const { assert(is_valid(), "invalid register"); return encoding_nocheck(); } +++ int encoding_nocheck() const { return (intptr_t)this; } +++ bool is_valid() const { return 0 <= encoding_nocheck() && encoding_nocheck() < number_of_registers; } +++ const char* name() const; +++ +++}; +++ +++// The vector registers of RVV +++CONSTANT_REGISTER_DECLARATION(VectorRegister, vnoreg , (-1)); +++ +++CONSTANT_REGISTER_DECLARATION(VectorRegister, v0 , ( 0)); +++CONSTANT_REGISTER_DECLARATION(VectorRegister, v1 , ( 1)); +++CONSTANT_REGISTER_DECLARATION(VectorRegister, v2 , ( 2)); +++CONSTANT_REGISTER_DECLARATION(VectorRegister, v3 , ( 3)); +++CONSTANT_REGISTER_DECLARATION(VectorRegister, v4 , ( 4)); +++CONSTANT_REGISTER_DECLARATION(VectorRegister, v5 , ( 5)); +++CONSTANT_REGISTER_DECLARATION(VectorRegister, v6 , ( 6)); +++CONSTANT_REGISTER_DECLARATION(VectorRegister, v7 , ( 7)); +++CONSTANT_REGISTER_DECLARATION(VectorRegister, v8 , ( 8)); +++CONSTANT_REGISTER_DECLARATION(VectorRegister, v9 , ( 9)); +++CONSTANT_REGISTER_DECLARATION(VectorRegister, v10 , (10)); +++CONSTANT_REGISTER_DECLARATION(VectorRegister, v11 , (11)); +++CONSTANT_REGISTER_DECLARATION(VectorRegister, v12 , (12)); +++CONSTANT_REGISTER_DECLARATION(VectorRegister, v13 , (13)); +++CONSTANT_REGISTER_DECLARATION(VectorRegister, v14 , (14)); +++CONSTANT_REGISTER_DECLARATION(VectorRegister, v15 , (15)); +++CONSTANT_REGISTER_DECLARATION(VectorRegister, v16 , (16)); +++CONSTANT_REGISTER_DECLARATION(VectorRegister, v17 , (17)); +++CONSTANT_REGISTER_DECLARATION(VectorRegister, v18 , (18)); +++CONSTANT_REGISTER_DECLARATION(VectorRegister, v19 , (19)); +++CONSTANT_REGISTER_DECLARATION(VectorRegister, v20 , (20)); +++CONSTANT_REGISTER_DECLARATION(VectorRegister, v21 , (21)); +++CONSTANT_REGISTER_DECLARATION(VectorRegister, v22 , (22)); +++CONSTANT_REGISTER_DECLARATION(VectorRegister, v23 , (23)); +++CONSTANT_REGISTER_DECLARATION(VectorRegister, v24 , (24)); +++CONSTANT_REGISTER_DECLARATION(VectorRegister, v25 , (25)); +++CONSTANT_REGISTER_DECLARATION(VectorRegister, v26 , (26)); +++CONSTANT_REGISTER_DECLARATION(VectorRegister, v27 , (27)); +++CONSTANT_REGISTER_DECLARATION(VectorRegister, v28 , (28)); +++CONSTANT_REGISTER_DECLARATION(VectorRegister, v29 , (29)); +++CONSTANT_REGISTER_DECLARATION(VectorRegister, v30 , (30)); +++CONSTANT_REGISTER_DECLARATION(VectorRegister, v31 , (31)); +++ +++ +++// Need to know the total number of registers of all sorts for SharedInfo. +++// Define a class that exports it. +++class ConcreteRegisterImpl : public AbstractRegisterImpl { +++ public: +++ enum { +++ // A big enough number for C2: all the registers plus flags +++ // This number must be large enough to cover REG_COUNT (defined by c2) registers. +++ // There is no requirement that any ordering here matches any ordering c2 gives +++ // it's optoregs. +++ +++ number_of_registers = (RegisterImpl::max_slots_per_register * RegisterImpl::number_of_registers + +++ FloatRegisterImpl::max_slots_per_register * FloatRegisterImpl::number_of_registers + +++ VectorRegisterImpl::max_slots_per_register * VectorRegisterImpl::number_of_registers) +++ }; +++ +++ // added to make it compile +++ static const int max_gpr; +++ static const int max_fpr; +++ static const int max_vpr; +++}; +++ +++// A set of registers +++template +++class AbstractRegSet { +++ uint32_t _bitset; +++ +++ AbstractRegSet(uint32_t bitset) : _bitset(bitset) { } +++ +++ public: +++ AbstractRegSet() : _bitset(0) { } +++ +++ AbstractRegSet(RegImpl r1) : _bitset(1 << r1->encoding()) { } +++ +++ AbstractRegSet operator+(const AbstractRegSet aSet) const { +++ AbstractRegSet result(_bitset | aSet._bitset); +++ return result; +++ } +++ +++ AbstractRegSet operator-(const AbstractRegSet aSet) const { +++ AbstractRegSet result(_bitset & ~aSet._bitset); +++ return result; +++ } +++ +++ AbstractRegSet &operator+=(const AbstractRegSet aSet) { +++ *this = *this + aSet; +++ return *this; +++ } +++ +++ AbstractRegSet &operator-=(const AbstractRegSet aSet) { +++ *this = *this - aSet; +++ return *this; +++ } +++ +++ static AbstractRegSet of(RegImpl r1) { +++ return AbstractRegSet(r1); +++ } +++ +++ static AbstractRegSet of(RegImpl r1, RegImpl r2) { +++ return of(r1) + r2; +++ } +++ +++ static AbstractRegSet of(RegImpl r1, RegImpl r2, RegImpl r3) { +++ return of(r1, r2) + r3; +++ } +++ +++ static AbstractRegSet of(RegImpl r1, RegImpl r2, RegImpl r3, RegImpl r4) { +++ return of(r1, r2, r3) + r4; +++ } +++ +++ static AbstractRegSet range(RegImpl start, RegImpl end) { +++ uint32_t bits = ~0; +++ bits <<= start->encoding(); +++ bits <<= (31 - end->encoding()); +++ bits >>= (31 - end->encoding()); +++ +++ return AbstractRegSet(bits); +++ } +++ +++ uint32_t bits() const { return _bitset; } +++}; +++ +++typedef AbstractRegSet RegSet; +++typedef AbstractRegSet FloatRegSet; +++typedef AbstractRegSet VectorRegSet; +++ +++#endif // CPU_RISCV_REGISTER_RISCV_HPP ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/relocInfo_riscv.cpp ++@@ -0,0 +1,113 @@ +++/* +++ * Copyright (c) 1998, 2020, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2014, Red Hat Inc. All rights reserved. +++ * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#include "precompiled.hpp" +++#include "asm/macroAssembler.hpp" +++#include "code/relocInfo.hpp" +++#include "nativeInst_riscv.hpp" +++#include "oops/oop.inline.hpp" +++#include "runtime/safepoint.hpp" +++ +++void Relocation::pd_set_data_value(address x, intptr_t o, bool verify_only) { +++ if (verify_only) { +++ return; +++ } +++ +++ int bytes; +++ +++ switch (type()) { +++ case relocInfo::oop_type: { +++ oop_Relocation *reloc = (oop_Relocation *)this; +++ // in movoop when BarrierSet::barrier_set()->barrier_set_nmethod() != NULL || !immediate +++ if (NativeInstruction::is_load_pc_relative_at(addr())) { +++ address constptr = (address)code()->oop_addr_at(reloc->oop_index()); +++ bytes = MacroAssembler::pd_patch_instruction_size(addr(), constptr); +++ assert(*(address*)constptr == x, "error in oop relocation"); +++ } else { +++ bytes = MacroAssembler::patch_oop(addr(), x); +++ } +++ break; +++ } +++ default: +++ bytes = MacroAssembler::pd_patch_instruction_size(addr(), x); +++ break; +++ } +++ ICache::invalidate_range(addr(), bytes); +++} +++ +++address Relocation::pd_call_destination(address orig_addr) { +++ assert(is_call(), "should be an address instruction here"); +++ if (NativeCall::is_call_at(addr())) { +++ address trampoline = nativeCall_at(addr())->get_trampoline(); +++ if (trampoline != NULL) { +++ return nativeCallTrampolineStub_at(trampoline)->destination(); +++ } +++ } +++ if (orig_addr != NULL) { +++ // the extracted address from the instructions in address orig_addr +++ address new_addr = MacroAssembler::pd_call_destination(orig_addr); +++ // If call is branch to self, don't try to relocate it, just leave it +++ // as branch to self. This happens during code generation if the code +++ // buffer expands. It will be relocated to the trampoline above once +++ // code generation is complete. +++ new_addr = (new_addr == orig_addr) ? addr() : new_addr; +++ return new_addr; +++ } +++ return MacroAssembler::pd_call_destination(addr()); +++} +++ +++void Relocation::pd_set_call_destination(address x) { +++ assert(is_call(), "should be an address instruction here"); +++ if (NativeCall::is_call_at(addr())) { +++ address trampoline = nativeCall_at(addr())->get_trampoline(); +++ if (trampoline != NULL) { +++ nativeCall_at(addr())->set_destination_mt_safe(x, /* assert_lock */false); +++ return; +++ } +++ } +++ MacroAssembler::pd_patch_instruction_size(addr(), x); +++ address pd_call = pd_call_destination(addr()); +++ assert(pd_call == x, "fail in reloc"); +++} +++ +++address* Relocation::pd_address_in_code() { +++ assert(NativeCall::is_load_pc_relative_at(addr()), "Not the expected instruction sequence!"); +++ return (address*)(MacroAssembler::target_addr_for_insn(addr())); +++} +++ +++address Relocation::pd_get_address_from_code() { +++ return MacroAssembler::pd_call_destination(addr()); +++} +++ +++void poll_Relocation::fix_relocation_after_move(const CodeBuffer* src, CodeBuffer* dest) { +++ if (NativeInstruction::maybe_cpool_ref(addr())) { +++ address old_addr = old_addr_for(addr(), src, dest); +++ MacroAssembler::pd_patch_instruction_size(addr(), MacroAssembler::target_addr_for_insn(old_addr)); +++ } +++} +++ +++void metadata_Relocation::pd_fix_value(address x) { +++} ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/relocInfo_riscv.hpp ++@@ -0,0 +1,44 @@ +++/* +++ * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#ifndef CPU_RISCV_RELOCINFO_RISCV_HPP +++#define CPU_RISCV_RELOCINFO_RISCV_HPP +++ +++ // machine-dependent parts of class relocInfo +++ private: +++ enum { +++ // Relocations are byte-aligned. +++ offset_unit = 1, +++ // Must be at least 1 for RelocInfo::narrow_oop_in_const. +++ format_width = 1 +++ }; +++ +++ public: +++ +++ // This platform has no oops in the code that are not also +++ // listed in the oop section. +++ static bool mustIterateImmediateOopsInCode() { return false; } +++ +++#endif // CPU_RISCV_RELOCINFO_RISCV_HPP ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/riscv.ad ++@@ -0,0 +1,10645 @@ +++// +++// Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved. +++// Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved. +++// Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. +++// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++// +++// This code is free software; you can redistribute it and/or modify it +++// under the terms of the GNU General Public License version 2 only, as +++// published by the Free Software Foundation. +++// +++// This code is distributed in the hope that it will be useful, but WITHOUT +++// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++// version 2 for more details (a copy is included in the LICENSE file that +++// accompanied this code). +++// +++// You should have received a copy of the GNU General Public License version +++// 2 along with this work; if not, write to the Free Software Foundation, +++// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++// +++// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++// or visit www.oracle.com if you need additional information or have any +++// questions. +++// +++// +++ +++// RISCV Architecture Description File +++ +++//----------REGISTER DEFINITION BLOCK------------------------------------------ +++// This information is used by the matcher and the register allocator to +++// describe individual registers and classes of registers within the target +++// archtecture. +++ +++register %{ +++//----------Architecture Description Register Definitions---------------------- +++// General Registers +++// "reg_def" name ( register save type, C convention save type, +++// ideal register type, encoding ); +++// Register Save Types: +++// +++// NS = No-Save: The register allocator assumes that these registers +++// can be used without saving upon entry to the method, & +++// that they do not need to be saved at call sites. +++// +++// SOC = Save-On-Call: The register allocator assumes that these registers +++// can be used without saving upon entry to the method, +++// but that they must be saved at call sites. +++// +++// SOE = Save-On-Entry: The register allocator assumes that these registers +++// must be saved before using them upon entry to the +++// method, but they do not need to be saved at call +++// sites. +++// +++// AS = Always-Save: The register allocator assumes that these registers +++// must be saved before using them upon entry to the +++// method, & that they must be saved at call sites. +++// +++// Ideal Register Type is used to determine how to save & restore a +++// register. Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get +++// spilled with LoadP/StoreP. If the register supports both, use Op_RegI. +++// +++// The encoding number is the actual bit-pattern placed into the opcodes. +++ +++// We must define the 64 bit int registers in two 32 bit halves, the +++// real lower register and a virtual upper half register. upper halves +++// are used by the register allocator but are not actually supplied as +++// operands to memory ops. +++// +++// follow the C1 compiler in making registers +++// +++// x7, x9-x17, x27-x31 volatile (caller save) +++// x0-x4, x8, x23 system (no save, no allocate) +++// x5-x6 non-allocatable (so we can use them as temporary regs) +++ +++// +++// as regards Java usage. we don't use any callee save registers +++// because this makes it difficult to de-optimise a frame (see comment +++// in x86 implementation of Deoptimization::unwind_callee_save_values) +++// +++ +++// General Registers +++ +++reg_def R0 ( NS, NS, Op_RegI, 0, x0->as_VMReg() ); // zr +++reg_def R0_H ( NS, NS, Op_RegI, 0, x0->as_VMReg()->next() ); +++reg_def R1 ( NS, SOC, Op_RegI, 1, x1->as_VMReg() ); // ra +++reg_def R1_H ( NS, SOC, Op_RegI, 1, x1->as_VMReg()->next() ); +++reg_def R2 ( NS, SOE, Op_RegI, 2, x2->as_VMReg() ); // sp +++reg_def R2_H ( NS, SOE, Op_RegI, 2, x2->as_VMReg()->next() ); +++reg_def R3 ( NS, NS, Op_RegI, 3, x3->as_VMReg() ); // gp +++reg_def R3_H ( NS, NS, Op_RegI, 3, x3->as_VMReg()->next() ); +++reg_def R4 ( NS, NS, Op_RegI, 4, x4->as_VMReg() ); // tp +++reg_def R4_H ( NS, NS, Op_RegI, 4, x4->as_VMReg()->next() ); +++reg_def R7 ( SOC, SOC, Op_RegI, 7, x7->as_VMReg() ); +++reg_def R7_H ( SOC, SOC, Op_RegI, 7, x7->as_VMReg()->next() ); +++reg_def R8 ( NS, SOE, Op_RegI, 8, x8->as_VMReg() ); // fp +++reg_def R8_H ( NS, SOE, Op_RegI, 8, x8->as_VMReg()->next() ); +++reg_def R9 ( SOC, SOE, Op_RegI, 9, x9->as_VMReg() ); +++reg_def R9_H ( SOC, SOE, Op_RegI, 9, x9->as_VMReg()->next() ); +++reg_def R10 ( SOC, SOC, Op_RegI, 10, x10->as_VMReg() ); +++reg_def R10_H ( SOC, SOC, Op_RegI, 10, x10->as_VMReg()->next()); +++reg_def R11 ( SOC, SOC, Op_RegI, 11, x11->as_VMReg() ); +++reg_def R11_H ( SOC, SOC, Op_RegI, 11, x11->as_VMReg()->next()); +++reg_def R12 ( SOC, SOC, Op_RegI, 12, x12->as_VMReg() ); +++reg_def R12_H ( SOC, SOC, Op_RegI, 12, x12->as_VMReg()->next()); +++reg_def R13 ( SOC, SOC, Op_RegI, 13, x13->as_VMReg() ); +++reg_def R13_H ( SOC, SOC, Op_RegI, 13, x13->as_VMReg()->next()); +++reg_def R14 ( SOC, SOC, Op_RegI, 14, x14->as_VMReg() ); +++reg_def R14_H ( SOC, SOC, Op_RegI, 14, x14->as_VMReg()->next()); +++reg_def R15 ( SOC, SOC, Op_RegI, 15, x15->as_VMReg() ); +++reg_def R15_H ( SOC, SOC, Op_RegI, 15, x15->as_VMReg()->next()); +++reg_def R16 ( SOC, SOC, Op_RegI, 16, x16->as_VMReg() ); +++reg_def R16_H ( SOC, SOC, Op_RegI, 16, x16->as_VMReg()->next()); +++reg_def R17 ( SOC, SOC, Op_RegI, 17, x17->as_VMReg() ); +++reg_def R17_H ( SOC, SOC, Op_RegI, 17, x17->as_VMReg()->next()); +++reg_def R18 ( SOC, SOE, Op_RegI, 18, x18->as_VMReg() ); +++reg_def R18_H ( SOC, SOE, Op_RegI, 18, x18->as_VMReg()->next()); +++reg_def R19 ( SOC, SOE, Op_RegI, 19, x19->as_VMReg() ); +++reg_def R19_H ( SOC, SOE, Op_RegI, 19, x19->as_VMReg()->next()); +++reg_def R20 ( SOC, SOE, Op_RegI, 20, x20->as_VMReg() ); // caller esp +++reg_def R20_H ( SOC, SOE, Op_RegI, 20, x20->as_VMReg()->next()); +++reg_def R21 ( SOC, SOE, Op_RegI, 21, x21->as_VMReg() ); +++reg_def R21_H ( SOC, SOE, Op_RegI, 21, x21->as_VMReg()->next()); +++reg_def R22 ( SOC, SOE, Op_RegI, 22, x22->as_VMReg() ); +++reg_def R22_H ( SOC, SOE, Op_RegI, 22, x22->as_VMReg()->next()); +++reg_def R23 ( NS, SOE, Op_RegI, 23, x23->as_VMReg() ); // java thread +++reg_def R23_H ( NS, SOE, Op_RegI, 23, x23->as_VMReg()->next()); +++reg_def R24 ( SOC, SOE, Op_RegI, 24, x24->as_VMReg() ); +++reg_def R24_H ( SOC, SOE, Op_RegI, 24, x24->as_VMReg()->next()); +++reg_def R25 ( SOC, SOE, Op_RegI, 25, x25->as_VMReg() ); +++reg_def R25_H ( SOC, SOE, Op_RegI, 25, x25->as_VMReg()->next()); +++reg_def R26 ( SOC, SOE, Op_RegI, 26, x26->as_VMReg() ); +++reg_def R26_H ( SOC, SOE, Op_RegI, 26, x26->as_VMReg()->next()); +++reg_def R27 ( SOC, SOE, Op_RegI, 27, x27->as_VMReg() ); // heapbase +++reg_def R27_H ( SOC, SOE, Op_RegI, 27, x27->as_VMReg()->next()); +++reg_def R28 ( SOC, SOC, Op_RegI, 28, x28->as_VMReg() ); +++reg_def R28_H ( SOC, SOC, Op_RegI, 28, x28->as_VMReg()->next()); +++reg_def R29 ( SOC, SOC, Op_RegI, 29, x29->as_VMReg() ); +++reg_def R29_H ( SOC, SOC, Op_RegI, 29, x29->as_VMReg()->next()); +++reg_def R30 ( SOC, SOC, Op_RegI, 30, x30->as_VMReg() ); +++reg_def R30_H ( SOC, SOC, Op_RegI, 30, x30->as_VMReg()->next()); +++reg_def R31 ( SOC, SOC, Op_RegI, 31, x31->as_VMReg() ); +++reg_def R31_H ( SOC, SOC, Op_RegI, 31, x31->as_VMReg()->next()); +++ +++// ---------------------------- +++// Float/Double Registers +++// ---------------------------- +++ +++// Double Registers +++ +++// The rules of ADL require that double registers be defined in pairs. +++// Each pair must be two 32-bit values, but not necessarily a pair of +++// single float registers. In each pair, ADLC-assigned register numbers +++// must be adjacent, with the lower number even. Finally, when the +++// CPU stores such a register pair to memory, the word associated with +++// the lower ADLC-assigned number must be stored to the lower address. +++ +++// RISCV has 32 floating-point registers. Each can store a single +++// or double precision floating-point value. +++ +++// for Java use float registers f0-f31 are always save on call whereas +++// the platform ABI treats f8-f9 and f18-f27 as callee save). Other +++// float registers are SOC as per the platform spec +++ +++reg_def F0 ( SOC, SOC, Op_RegF, 0, f0->as_VMReg() ); +++reg_def F0_H ( SOC, SOC, Op_RegF, 0, f0->as_VMReg()->next() ); +++reg_def F1 ( SOC, SOC, Op_RegF, 1, f1->as_VMReg() ); +++reg_def F1_H ( SOC, SOC, Op_RegF, 1, f1->as_VMReg()->next() ); +++reg_def F2 ( SOC, SOC, Op_RegF, 2, f2->as_VMReg() ); +++reg_def F2_H ( SOC, SOC, Op_RegF, 2, f2->as_VMReg()->next() ); +++reg_def F3 ( SOC, SOC, Op_RegF, 3, f3->as_VMReg() ); +++reg_def F3_H ( SOC, SOC, Op_RegF, 3, f3->as_VMReg()->next() ); +++reg_def F4 ( SOC, SOC, Op_RegF, 4, f4->as_VMReg() ); +++reg_def F4_H ( SOC, SOC, Op_RegF, 4, f4->as_VMReg()->next() ); +++reg_def F5 ( SOC, SOC, Op_RegF, 5, f5->as_VMReg() ); +++reg_def F5_H ( SOC, SOC, Op_RegF, 5, f5->as_VMReg()->next() ); +++reg_def F6 ( SOC, SOC, Op_RegF, 6, f6->as_VMReg() ); +++reg_def F6_H ( SOC, SOC, Op_RegF, 6, f6->as_VMReg()->next() ); +++reg_def F7 ( SOC, SOC, Op_RegF, 7, f7->as_VMReg() ); +++reg_def F7_H ( SOC, SOC, Op_RegF, 7, f7->as_VMReg()->next() ); +++reg_def F8 ( SOC, SOE, Op_RegF, 8, f8->as_VMReg() ); +++reg_def F8_H ( SOC, SOE, Op_RegF, 8, f8->as_VMReg()->next() ); +++reg_def F9 ( SOC, SOE, Op_RegF, 9, f9->as_VMReg() ); +++reg_def F9_H ( SOC, SOE, Op_RegF, 9, f9->as_VMReg()->next() ); +++reg_def F10 ( SOC, SOC, Op_RegF, 10, f10->as_VMReg() ); +++reg_def F10_H ( SOC, SOC, Op_RegF, 10, f10->as_VMReg()->next() ); +++reg_def F11 ( SOC, SOC, Op_RegF, 11, f11->as_VMReg() ); +++reg_def F11_H ( SOC, SOC, Op_RegF, 11, f11->as_VMReg()->next() ); +++reg_def F12 ( SOC, SOC, Op_RegF, 12, f12->as_VMReg() ); +++reg_def F12_H ( SOC, SOC, Op_RegF, 12, f12->as_VMReg()->next() ); +++reg_def F13 ( SOC, SOC, Op_RegF, 13, f13->as_VMReg() ); +++reg_def F13_H ( SOC, SOC, Op_RegF, 13, f13->as_VMReg()->next() ); +++reg_def F14 ( SOC, SOC, Op_RegF, 14, f14->as_VMReg() ); +++reg_def F14_H ( SOC, SOC, Op_RegF, 14, f14->as_VMReg()->next() ); +++reg_def F15 ( SOC, SOC, Op_RegF, 15, f15->as_VMReg() ); +++reg_def F15_H ( SOC, SOC, Op_RegF, 15, f15->as_VMReg()->next() ); +++reg_def F16 ( SOC, SOC, Op_RegF, 16, f16->as_VMReg() ); +++reg_def F16_H ( SOC, SOC, Op_RegF, 16, f16->as_VMReg()->next() ); +++reg_def F17 ( SOC, SOC, Op_RegF, 17, f17->as_VMReg() ); +++reg_def F17_H ( SOC, SOC, Op_RegF, 17, f17->as_VMReg()->next() ); +++reg_def F18 ( SOC, SOE, Op_RegF, 18, f18->as_VMReg() ); +++reg_def F18_H ( SOC, SOE, Op_RegF, 18, f18->as_VMReg()->next() ); +++reg_def F19 ( SOC, SOE, Op_RegF, 19, f19->as_VMReg() ); +++reg_def F19_H ( SOC, SOE, Op_RegF, 19, f19->as_VMReg()->next() ); +++reg_def F20 ( SOC, SOE, Op_RegF, 20, f20->as_VMReg() ); +++reg_def F20_H ( SOC, SOE, Op_RegF, 20, f20->as_VMReg()->next() ); +++reg_def F21 ( SOC, SOE, Op_RegF, 21, f21->as_VMReg() ); +++reg_def F21_H ( SOC, SOE, Op_RegF, 21, f21->as_VMReg()->next() ); +++reg_def F22 ( SOC, SOE, Op_RegF, 22, f22->as_VMReg() ); +++reg_def F22_H ( SOC, SOE, Op_RegF, 22, f22->as_VMReg()->next() ); +++reg_def F23 ( SOC, SOE, Op_RegF, 23, f23->as_VMReg() ); +++reg_def F23_H ( SOC, SOE, Op_RegF, 23, f23->as_VMReg()->next() ); +++reg_def F24 ( SOC, SOE, Op_RegF, 24, f24->as_VMReg() ); +++reg_def F24_H ( SOC, SOE, Op_RegF, 24, f24->as_VMReg()->next() ); +++reg_def F25 ( SOC, SOE, Op_RegF, 25, f25->as_VMReg() ); +++reg_def F25_H ( SOC, SOE, Op_RegF, 25, f25->as_VMReg()->next() ); +++reg_def F26 ( SOC, SOE, Op_RegF, 26, f26->as_VMReg() ); +++reg_def F26_H ( SOC, SOE, Op_RegF, 26, f26->as_VMReg()->next() ); +++reg_def F27 ( SOC, SOE, Op_RegF, 27, f27->as_VMReg() ); +++reg_def F27_H ( SOC, SOE, Op_RegF, 27, f27->as_VMReg()->next() ); +++reg_def F28 ( SOC, SOC, Op_RegF, 28, f28->as_VMReg() ); +++reg_def F28_H ( SOC, SOC, Op_RegF, 28, f28->as_VMReg()->next() ); +++reg_def F29 ( SOC, SOC, Op_RegF, 29, f29->as_VMReg() ); +++reg_def F29_H ( SOC, SOC, Op_RegF, 29, f29->as_VMReg()->next() ); +++reg_def F30 ( SOC, SOC, Op_RegF, 30, f30->as_VMReg() ); +++reg_def F30_H ( SOC, SOC, Op_RegF, 30, f30->as_VMReg()->next() ); +++reg_def F31 ( SOC, SOC, Op_RegF, 31, f31->as_VMReg() ); +++reg_def F31_H ( SOC, SOC, Op_RegF, 31, f31->as_VMReg()->next() ); +++ +++// ---------------------------- +++// Vector Registers +++// ---------------------------- +++ +++// For RVV vector registers, we simply extend vector register size to 4 +++// 'logical' slots. This is nominally 128 bits but it actually covers +++// all possible 'physical' RVV vector register lengths from 128 ~ 1024 +++// bits. The 'physical' RVV vector register length is detected during +++// startup, so the register allocator is able to identify the correct +++// number of bytes needed for an RVV spill/unspill. +++ +++reg_def V0 ( SOC, SOC, Op_VecA, 0, v0->as_VMReg() ); +++reg_def V0_H ( SOC, SOC, Op_VecA, 0, v0->as_VMReg()->next() ); +++reg_def V0_J ( SOC, SOC, Op_VecA, 0, v0->as_VMReg()->next(2) ); +++reg_def V0_K ( SOC, SOC, Op_VecA, 0, v0->as_VMReg()->next(3) ); +++ +++reg_def V1 ( SOC, SOC, Op_VecA, 1, v1->as_VMReg() ); +++reg_def V1_H ( SOC, SOC, Op_VecA, 1, v1->as_VMReg()->next() ); +++reg_def V1_J ( SOC, SOC, Op_VecA, 1, v1->as_VMReg()->next(2) ); +++reg_def V1_K ( SOC, SOC, Op_VecA, 1, v1->as_VMReg()->next(3) ); +++ +++reg_def V2 ( SOC, SOC, Op_VecA, 2, v2->as_VMReg() ); +++reg_def V2_H ( SOC, SOC, Op_VecA, 2, v2->as_VMReg()->next() ); +++reg_def V2_J ( SOC, SOC, Op_VecA, 2, v2->as_VMReg()->next(2) ); +++reg_def V2_K ( SOC, SOC, Op_VecA, 2, v2->as_VMReg()->next(3) ); +++ +++reg_def V3 ( SOC, SOC, Op_VecA, 3, v3->as_VMReg() ); +++reg_def V3_H ( SOC, SOC, Op_VecA, 3, v3->as_VMReg()->next() ); +++reg_def V3_J ( SOC, SOC, Op_VecA, 3, v3->as_VMReg()->next(2) ); +++reg_def V3_K ( SOC, SOC, Op_VecA, 3, v3->as_VMReg()->next(3) ); +++ +++reg_def V4 ( SOC, SOC, Op_VecA, 4, v4->as_VMReg() ); +++reg_def V4_H ( SOC, SOC, Op_VecA, 4, v4->as_VMReg()->next() ); +++reg_def V4_J ( SOC, SOC, Op_VecA, 4, v4->as_VMReg()->next(2) ); +++reg_def V4_K ( SOC, SOC, Op_VecA, 4, v4->as_VMReg()->next(3) ); +++ +++reg_def V5 ( SOC, SOC, Op_VecA, 5, v5->as_VMReg() ); +++reg_def V5_H ( SOC, SOC, Op_VecA, 5, v5->as_VMReg()->next() ); +++reg_def V5_J ( SOC, SOC, Op_VecA, 5, v5->as_VMReg()->next(2) ); +++reg_def V5_K ( SOC, SOC, Op_VecA, 5, v5->as_VMReg()->next(3) ); +++ +++reg_def V6 ( SOC, SOC, Op_VecA, 6, v6->as_VMReg() ); +++reg_def V6_H ( SOC, SOC, Op_VecA, 6, v6->as_VMReg()->next() ); +++reg_def V6_J ( SOC, SOC, Op_VecA, 6, v6->as_VMReg()->next(2) ); +++reg_def V6_K ( SOC, SOC, Op_VecA, 6, v6->as_VMReg()->next(3) ); +++ +++reg_def V7 ( SOC, SOC, Op_VecA, 7, v7->as_VMReg() ); +++reg_def V7_H ( SOC, SOC, Op_VecA, 7, v7->as_VMReg()->next() ); +++reg_def V7_J ( SOC, SOC, Op_VecA, 7, v7->as_VMReg()->next(2) ); +++reg_def V7_K ( SOC, SOC, Op_VecA, 7, v7->as_VMReg()->next(3) ); +++ +++reg_def V8 ( SOC, SOC, Op_VecA, 8, v8->as_VMReg() ); +++reg_def V8_H ( SOC, SOC, Op_VecA, 8, v8->as_VMReg()->next() ); +++reg_def V8_J ( SOC, SOC, Op_VecA, 8, v8->as_VMReg()->next(2) ); +++reg_def V8_K ( SOC, SOC, Op_VecA, 8, v8->as_VMReg()->next(3) ); +++ +++reg_def V9 ( SOC, SOC, Op_VecA, 9, v9->as_VMReg() ); +++reg_def V9_H ( SOC, SOC, Op_VecA, 9, v9->as_VMReg()->next() ); +++reg_def V9_J ( SOC, SOC, Op_VecA, 9, v9->as_VMReg()->next(2) ); +++reg_def V9_K ( SOC, SOC, Op_VecA, 9, v9->as_VMReg()->next(3) ); +++ +++reg_def V10 ( SOC, SOC, Op_VecA, 10, v10->as_VMReg() ); +++reg_def V10_H ( SOC, SOC, Op_VecA, 10, v10->as_VMReg()->next() ); +++reg_def V10_J ( SOC, SOC, Op_VecA, 10, v10->as_VMReg()->next(2) ); +++reg_def V10_K ( SOC, SOC, Op_VecA, 10, v10->as_VMReg()->next(3) ); +++ +++reg_def V11 ( SOC, SOC, Op_VecA, 11, v11->as_VMReg() ); +++reg_def V11_H ( SOC, SOC, Op_VecA, 11, v11->as_VMReg()->next() ); +++reg_def V11_J ( SOC, SOC, Op_VecA, 11, v11->as_VMReg()->next(2) ); +++reg_def V11_K ( SOC, SOC, Op_VecA, 11, v11->as_VMReg()->next(3) ); +++ +++reg_def V12 ( SOC, SOC, Op_VecA, 12, v12->as_VMReg() ); +++reg_def V12_H ( SOC, SOC, Op_VecA, 12, v12->as_VMReg()->next() ); +++reg_def V12_J ( SOC, SOC, Op_VecA, 12, v12->as_VMReg()->next(2) ); +++reg_def V12_K ( SOC, SOC, Op_VecA, 12, v12->as_VMReg()->next(3) ); +++ +++reg_def V13 ( SOC, SOC, Op_VecA, 13, v13->as_VMReg() ); +++reg_def V13_H ( SOC, SOC, Op_VecA, 13, v13->as_VMReg()->next() ); +++reg_def V13_J ( SOC, SOC, Op_VecA, 13, v13->as_VMReg()->next(2) ); +++reg_def V13_K ( SOC, SOC, Op_VecA, 13, v13->as_VMReg()->next(3) ); +++ +++reg_def V14 ( SOC, SOC, Op_VecA, 14, v14->as_VMReg() ); +++reg_def V14_H ( SOC, SOC, Op_VecA, 14, v14->as_VMReg()->next() ); +++reg_def V14_J ( SOC, SOC, Op_VecA, 14, v14->as_VMReg()->next(2) ); +++reg_def V14_K ( SOC, SOC, Op_VecA, 14, v14->as_VMReg()->next(3) ); +++ +++reg_def V15 ( SOC, SOC, Op_VecA, 15, v15->as_VMReg() ); +++reg_def V15_H ( SOC, SOC, Op_VecA, 15, v15->as_VMReg()->next() ); +++reg_def V15_J ( SOC, SOC, Op_VecA, 15, v15->as_VMReg()->next(2) ); +++reg_def V15_K ( SOC, SOC, Op_VecA, 15, v15->as_VMReg()->next(3) ); +++ +++reg_def V16 ( SOC, SOC, Op_VecA, 16, v16->as_VMReg() ); +++reg_def V16_H ( SOC, SOC, Op_VecA, 16, v16->as_VMReg()->next() ); +++reg_def V16_J ( SOC, SOC, Op_VecA, 16, v16->as_VMReg()->next(2) ); +++reg_def V16_K ( SOC, SOC, Op_VecA, 16, v16->as_VMReg()->next(3) ); +++ +++reg_def V17 ( SOC, SOC, Op_VecA, 17, v17->as_VMReg() ); +++reg_def V17_H ( SOC, SOC, Op_VecA, 17, v17->as_VMReg()->next() ); +++reg_def V17_J ( SOC, SOC, Op_VecA, 17, v17->as_VMReg()->next(2) ); +++reg_def V17_K ( SOC, SOC, Op_VecA, 17, v17->as_VMReg()->next(3) ); +++ +++reg_def V18 ( SOC, SOC, Op_VecA, 18, v18->as_VMReg() ); +++reg_def V18_H ( SOC, SOC, Op_VecA, 18, v18->as_VMReg()->next() ); +++reg_def V18_J ( SOC, SOC, Op_VecA, 18, v18->as_VMReg()->next(2) ); +++reg_def V18_K ( SOC, SOC, Op_VecA, 18, v18->as_VMReg()->next(3) ); +++ +++reg_def V19 ( SOC, SOC, Op_VecA, 19, v19->as_VMReg() ); +++reg_def V19_H ( SOC, SOC, Op_VecA, 19, v19->as_VMReg()->next() ); +++reg_def V19_J ( SOC, SOC, Op_VecA, 19, v19->as_VMReg()->next(2) ); +++reg_def V19_K ( SOC, SOC, Op_VecA, 19, v19->as_VMReg()->next(3) ); +++ +++reg_def V20 ( SOC, SOC, Op_VecA, 20, v20->as_VMReg() ); +++reg_def V20_H ( SOC, SOC, Op_VecA, 20, v20->as_VMReg()->next() ); +++reg_def V20_J ( SOC, SOC, Op_VecA, 20, v20->as_VMReg()->next(2) ); +++reg_def V20_K ( SOC, SOC, Op_VecA, 20, v20->as_VMReg()->next(3) ); +++ +++reg_def V21 ( SOC, SOC, Op_VecA, 21, v21->as_VMReg() ); +++reg_def V21_H ( SOC, SOC, Op_VecA, 21, v21->as_VMReg()->next() ); +++reg_def V21_J ( SOC, SOC, Op_VecA, 21, v21->as_VMReg()->next(2) ); +++reg_def V21_K ( SOC, SOC, Op_VecA, 21, v21->as_VMReg()->next(3) ); +++ +++reg_def V22 ( SOC, SOC, Op_VecA, 22, v22->as_VMReg() ); +++reg_def V22_H ( SOC, SOC, Op_VecA, 22, v22->as_VMReg()->next() ); +++reg_def V22_J ( SOC, SOC, Op_VecA, 22, v22->as_VMReg()->next(2) ); +++reg_def V22_K ( SOC, SOC, Op_VecA, 22, v22->as_VMReg()->next(3) ); +++ +++reg_def V23 ( SOC, SOC, Op_VecA, 23, v23->as_VMReg() ); +++reg_def V23_H ( SOC, SOC, Op_VecA, 23, v23->as_VMReg()->next() ); +++reg_def V23_J ( SOC, SOC, Op_VecA, 23, v23->as_VMReg()->next(2) ); +++reg_def V23_K ( SOC, SOC, Op_VecA, 23, v23->as_VMReg()->next(3) ); +++ +++reg_def V24 ( SOC, SOC, Op_VecA, 24, v24->as_VMReg() ); +++reg_def V24_H ( SOC, SOC, Op_VecA, 24, v24->as_VMReg()->next() ); +++reg_def V24_J ( SOC, SOC, Op_VecA, 24, v24->as_VMReg()->next(2) ); +++reg_def V24_K ( SOC, SOC, Op_VecA, 24, v24->as_VMReg()->next(3) ); +++ +++reg_def V25 ( SOC, SOC, Op_VecA, 25, v25->as_VMReg() ); +++reg_def V25_H ( SOC, SOC, Op_VecA, 25, v25->as_VMReg()->next() ); +++reg_def V25_J ( SOC, SOC, Op_VecA, 25, v25->as_VMReg()->next(2) ); +++reg_def V25_K ( SOC, SOC, Op_VecA, 25, v25->as_VMReg()->next(3) ); +++ +++reg_def V26 ( SOC, SOC, Op_VecA, 26, v26->as_VMReg() ); +++reg_def V26_H ( SOC, SOC, Op_VecA, 26, v26->as_VMReg()->next() ); +++reg_def V26_J ( SOC, SOC, Op_VecA, 26, v26->as_VMReg()->next(2) ); +++reg_def V26_K ( SOC, SOC, Op_VecA, 26, v26->as_VMReg()->next(3) ); +++ +++reg_def V27 ( SOC, SOC, Op_VecA, 27, v27->as_VMReg() ); +++reg_def V27_H ( SOC, SOC, Op_VecA, 27, v27->as_VMReg()->next() ); +++reg_def V27_J ( SOC, SOC, Op_VecA, 27, v27->as_VMReg()->next(2) ); +++reg_def V27_K ( SOC, SOC, Op_VecA, 27, v27->as_VMReg()->next(3) ); +++ +++reg_def V28 ( SOC, SOC, Op_VecA, 28, v28->as_VMReg() ); +++reg_def V28_H ( SOC, SOC, Op_VecA, 28, v28->as_VMReg()->next() ); +++reg_def V28_J ( SOC, SOC, Op_VecA, 28, v28->as_VMReg()->next(2) ); +++reg_def V28_K ( SOC, SOC, Op_VecA, 28, v28->as_VMReg()->next(3) ); +++ +++reg_def V29 ( SOC, SOC, Op_VecA, 29, v29->as_VMReg() ); +++reg_def V29_H ( SOC, SOC, Op_VecA, 29, v29->as_VMReg()->next() ); +++reg_def V29_J ( SOC, SOC, Op_VecA, 29, v29->as_VMReg()->next(2) ); +++reg_def V29_K ( SOC, SOC, Op_VecA, 29, v29->as_VMReg()->next(3) ); +++ +++reg_def V30 ( SOC, SOC, Op_VecA, 30, v30->as_VMReg() ); +++reg_def V30_H ( SOC, SOC, Op_VecA, 30, v30->as_VMReg()->next() ); +++reg_def V30_J ( SOC, SOC, Op_VecA, 30, v30->as_VMReg()->next(2) ); +++reg_def V30_K ( SOC, SOC, Op_VecA, 30, v30->as_VMReg()->next(3) ); +++ +++reg_def V31 ( SOC, SOC, Op_VecA, 31, v31->as_VMReg() ); +++reg_def V31_H ( SOC, SOC, Op_VecA, 31, v31->as_VMReg()->next() ); +++reg_def V31_J ( SOC, SOC, Op_VecA, 31, v31->as_VMReg()->next(2) ); +++reg_def V31_K ( SOC, SOC, Op_VecA, 31, v31->as_VMReg()->next(3) ); +++ +++// ---------------------------- +++// Special Registers +++// ---------------------------- +++ +++// On riscv, the physical flag register is missing, so we use t1 instead, +++// to bridge the RegFlag semantics in share/opto +++ +++reg_def RFLAGS (SOC, SOC, Op_RegFlags, 6, x6->as_VMReg() ); +++ +++// Specify priority of register selection within phases of register +++// allocation. Highest priority is first. A useful heuristic is to +++// give registers a low priority when they are required by machine +++// instructions, like EAX and EDX on I486, and choose no-save registers +++// before save-on-call, & save-on-call before save-on-entry. Registers +++// which participate in fixed calling sequences should come last. +++// Registers which are used as pairs must fall on an even boundary. +++ +++alloc_class chunk0( +++ // volatiles +++ R7, R7_H, +++ R28, R28_H, +++ R29, R29_H, +++ R30, R30_H, +++ R31, R31_H, +++ +++ // arg registers +++ R10, R10_H, +++ R11, R11_H, +++ R12, R12_H, +++ R13, R13_H, +++ R14, R14_H, +++ R15, R15_H, +++ R16, R16_H, +++ R17, R17_H, +++ +++ // non-volatiles +++ R9, R9_H, +++ R18, R18_H, +++ R19, R19_H, +++ R20, R20_H, +++ R21, R21_H, +++ R22, R22_H, +++ R24, R24_H, +++ R25, R25_H, +++ R26, R26_H, +++ +++ // non-allocatable registers +++ R23, R23_H, // java thread +++ R27, R27_H, // heapbase +++ R4, R4_H, // thread +++ R8, R8_H, // fp +++ R0, R0_H, // zero +++ R1, R1_H, // ra +++ R2, R2_H, // sp +++ R3, R3_H, // gp +++); +++ +++alloc_class chunk1( +++ +++ // no save +++ F0, F0_H, +++ F1, F1_H, +++ F2, F2_H, +++ F3, F3_H, +++ F4, F4_H, +++ F5, F5_H, +++ F6, F6_H, +++ F7, F7_H, +++ F28, F28_H, +++ F29, F29_H, +++ F30, F30_H, +++ F31, F31_H, +++ +++ // arg registers +++ F10, F10_H, +++ F11, F11_H, +++ F12, F12_H, +++ F13, F13_H, +++ F14, F14_H, +++ F15, F15_H, +++ F16, F16_H, +++ F17, F17_H, +++ +++ // non-volatiles +++ F8, F8_H, +++ F9, F9_H, +++ F18, F18_H, +++ F19, F19_H, +++ F20, F20_H, +++ F21, F21_H, +++ F22, F22_H, +++ F23, F23_H, +++ F24, F24_H, +++ F25, F25_H, +++ F26, F26_H, +++ F27, F27_H, +++); +++ +++alloc_class chunk2( +++ V0, V0_H, V0_J, V0_K, +++ V1, V1_H, V1_J, V1_K, +++ V2, V2_H, V2_J, V2_K, +++ V3, V3_H, V3_J, V3_K, +++ V4, V4_H, V4_J, V4_K, +++ V5, V5_H, V5_J, V5_K, +++ V6, V6_H, V6_J, V6_K, +++ V7, V7_H, V7_J, V7_K, +++ V8, V8_H, V8_J, V8_K, +++ V9, V9_H, V9_J, V9_K, +++ V10, V10_H, V10_J, V10_K, +++ V11, V11_H, V11_J, V11_K, +++ V12, V12_H, V12_J, V12_K, +++ V13, V13_H, V13_J, V13_K, +++ V14, V14_H, V14_J, V14_K, +++ V15, V15_H, V15_J, V15_K, +++ V16, V16_H, V16_J, V16_K, +++ V17, V17_H, V17_J, V17_K, +++ V18, V18_H, V18_J, V18_K, +++ V19, V19_H, V19_J, V19_K, +++ V20, V20_H, V20_J, V20_K, +++ V21, V21_H, V21_J, V21_K, +++ V22, V22_H, V22_J, V22_K, +++ V23, V23_H, V23_J, V23_K, +++ V24, V24_H, V24_J, V24_K, +++ V25, V25_H, V25_J, V25_K, +++ V26, V26_H, V26_J, V26_K, +++ V27, V27_H, V27_J, V27_K, +++ V28, V28_H, V28_J, V28_K, +++ V29, V29_H, V29_J, V29_K, +++ V30, V30_H, V30_J, V30_K, +++ V31, V31_H, V31_J, V31_K, +++); +++ +++alloc_class chunk3(RFLAGS); +++ +++//----------Architecture Description Register Classes-------------------------- +++// Several register classes are automatically defined based upon information in +++// this architecture description. +++// 1) reg_class inline_cache_reg ( /* as def'd in frame section */ ) +++// 2) reg_class stack_slots( /* one chunk of stack-based "registers" */ ) +++// +++ +++// Class for all 32 bit general purpose registers +++reg_class all_reg32( +++ R0, +++ R1, +++ R2, +++ R3, +++ R4, +++ R7, +++ R8, +++ R9, +++ R10, +++ R11, +++ R12, +++ R13, +++ R14, +++ R15, +++ R16, +++ R17, +++ R18, +++ R19, +++ R20, +++ R21, +++ R22, +++ R23, +++ R24, +++ R25, +++ R26, +++ R27, +++ R28, +++ R29, +++ R30, +++ R31 +++); +++ +++// Class for any 32 bit integer registers (excluding zr) +++reg_class any_reg32 %{ +++ return _ANY_REG32_mask; +++%} +++ +++// Singleton class for R10 int register +++reg_class int_r10_reg(R10); +++ +++// Singleton class for R12 int register +++reg_class int_r12_reg(R12); +++ +++// Singleton class for R13 int register +++reg_class int_r13_reg(R13); +++ +++// Singleton class for R14 int register +++reg_class int_r14_reg(R14); +++ +++// Class for all long integer registers +++reg_class all_reg( +++ R0, R0_H, +++ R1, R1_H, +++ R2, R2_H, +++ R3, R3_H, +++ R4, R4_H, +++ R7, R7_H, +++ R8, R8_H, +++ R9, R9_H, +++ R10, R10_H, +++ R11, R11_H, +++ R12, R12_H, +++ R13, R13_H, +++ R14, R14_H, +++ R15, R15_H, +++ R16, R16_H, +++ R17, R17_H, +++ R18, R18_H, +++ R19, R19_H, +++ R20, R20_H, +++ R21, R21_H, +++ R22, R22_H, +++ R23, R23_H, +++ R24, R24_H, +++ R25, R25_H, +++ R26, R26_H, +++ R27, R27_H, +++ R28, R28_H, +++ R29, R29_H, +++ R30, R30_H, +++ R31, R31_H +++); +++ +++// Class for all long integer registers (excluding zr) +++reg_class any_reg %{ +++ return _ANY_REG_mask; +++%} +++ +++// Class for non-allocatable 32 bit registers +++reg_class non_allocatable_reg32( +++ R0, // zr +++ R1, // ra +++ R2, // sp +++ R3, // gp +++ R4, // tp +++ R23 // java thread +++); +++ +++// Class for non-allocatable 64 bit registers +++reg_class non_allocatable_reg( +++ R0, R0_H, // zr +++ R1, R1_H, // ra +++ R2, R2_H, // sp +++ R3, R3_H, // gp +++ R4, R4_H, // tp +++ R23, R23_H // java thread +++); +++ +++reg_class no_special_reg32 %{ +++ return _NO_SPECIAL_REG32_mask; +++%} +++ +++reg_class no_special_reg %{ +++ return _NO_SPECIAL_REG_mask; +++%} +++ +++reg_class ptr_reg %{ +++ return _PTR_REG_mask; +++%} +++ +++reg_class no_special_ptr_reg %{ +++ return _NO_SPECIAL_PTR_REG_mask; +++%} +++ +++// Class for 64 bit register r10 +++reg_class r10_reg( +++ R10, R10_H +++); +++ +++// Class for 64 bit register r11 +++reg_class r11_reg( +++ R11, R11_H +++); +++ +++// Class for 64 bit register r12 +++reg_class r12_reg( +++ R12, R12_H +++); +++ +++// Class for 64 bit register r13 +++reg_class r13_reg( +++ R13, R13_H +++); +++ +++// Class for 64 bit register r14 +++reg_class r14_reg( +++ R14, R14_H +++); +++ +++// Class for 64 bit register r15 +++reg_class r15_reg( +++ R15, R15_H +++); +++ +++// Class for 64 bit register r16 +++reg_class r16_reg( +++ R16, R16_H +++); +++ +++// Class for method register +++reg_class method_reg( +++ R31, R31_H +++); +++ +++// Class for heapbase register +++reg_class heapbase_reg( +++ R27, R27_H +++); +++ +++// Class for java thread register +++reg_class java_thread_reg( +++ R23, R23_H +++); +++ +++reg_class r28_reg( +++ R28, R28_H +++); +++ +++reg_class r29_reg( +++ R29, R29_H +++); +++ +++reg_class r30_reg( +++ R30, R30_H +++); +++ +++// Class for zero registesr +++reg_class zr_reg( +++ R0, R0_H +++); +++ +++// Class for thread register +++reg_class thread_reg( +++ R4, R4_H +++); +++ +++// Class for frame pointer register +++reg_class fp_reg( +++ R8, R8_H +++); +++ +++// Class for link register +++reg_class ra_reg( +++ R1, R1_H +++); +++ +++// Class for long sp register +++reg_class sp_reg( +++ R2, R2_H +++); +++ +++// Class for all float registers +++reg_class float_reg( +++ F0, +++ F1, +++ F2, +++ F3, +++ F4, +++ F5, +++ F6, +++ F7, +++ F8, +++ F9, +++ F10, +++ F11, +++ F12, +++ F13, +++ F14, +++ F15, +++ F16, +++ F17, +++ F18, +++ F19, +++ F20, +++ F21, +++ F22, +++ F23, +++ F24, +++ F25, +++ F26, +++ F27, +++ F28, +++ F29, +++ F30, +++ F31 +++); +++ +++// Double precision float registers have virtual `high halves' that +++// are needed by the allocator. +++// Class for all double registers +++reg_class double_reg( +++ F0, F0_H, +++ F1, F1_H, +++ F2, F2_H, +++ F3, F3_H, +++ F4, F4_H, +++ F5, F5_H, +++ F6, F6_H, +++ F7, F7_H, +++ F8, F8_H, +++ F9, F9_H, +++ F10, F10_H, +++ F11, F11_H, +++ F12, F12_H, +++ F13, F13_H, +++ F14, F14_H, +++ F15, F15_H, +++ F16, F16_H, +++ F17, F17_H, +++ F18, F18_H, +++ F19, F19_H, +++ F20, F20_H, +++ F21, F21_H, +++ F22, F22_H, +++ F23, F23_H, +++ F24, F24_H, +++ F25, F25_H, +++ F26, F26_H, +++ F27, F27_H, +++ F28, F28_H, +++ F29, F29_H, +++ F30, F30_H, +++ F31, F31_H +++); +++ +++// Class for all RVV vector registers +++reg_class vectora_reg( +++ V1, V1_H, V1_J, V1_K, +++ V2, V2_H, V2_J, V2_K, +++ V3, V3_H, V3_J, V3_K, +++ V4, V4_H, V4_J, V4_K, +++ V5, V5_H, V5_J, V5_K, +++ V6, V6_H, V6_J, V6_K, +++ V7, V7_H, V7_J, V7_K, +++ V8, V8_H, V8_J, V8_K, +++ V9, V9_H, V9_J, V9_K, +++ V10, V10_H, V10_J, V10_K, +++ V11, V11_H, V11_J, V11_K, +++ V12, V12_H, V12_J, V12_K, +++ V13, V13_H, V13_J, V13_K, +++ V14, V14_H, V14_J, V14_K, +++ V15, V15_H, V15_J, V15_K, +++ V16, V16_H, V16_J, V16_K, +++ V17, V17_H, V17_J, V17_K, +++ V18, V18_H, V18_J, V18_K, +++ V19, V19_H, V19_J, V19_K, +++ V20, V20_H, V20_J, V20_K, +++ V21, V21_H, V21_J, V21_K, +++ V22, V22_H, V22_J, V22_K, +++ V23, V23_H, V23_J, V23_K, +++ V24, V24_H, V24_J, V24_K, +++ V25, V25_H, V25_J, V25_K, +++ V26, V26_H, V26_J, V26_K, +++ V27, V27_H, V27_J, V27_K, +++ V28, V28_H, V28_J, V28_K, +++ V29, V29_H, V29_J, V29_K, +++ V30, V30_H, V30_J, V30_K, +++ V31, V31_H, V31_J, V31_K +++); +++ +++// Class for 64 bit register f0 +++reg_class f0_reg( +++ F0, F0_H +++); +++ +++// Class for 64 bit register f1 +++reg_class f1_reg( +++ F1, F1_H +++); +++ +++// Class for 64 bit register f2 +++reg_class f2_reg( +++ F2, F2_H +++); +++ +++// Class for 64 bit register f3 +++reg_class f3_reg( +++ F3, F3_H +++); +++ +++// class for vector register v1 +++reg_class v1_reg( +++ V1, V1_H, V1_J, V1_K +++); +++ +++// class for vector register v2 +++reg_class v2_reg( +++ V2, V2_H, V2_J, V2_K +++); +++ +++// class for vector register v3 +++reg_class v3_reg( +++ V3, V3_H, V3_J, V3_K +++); +++ +++// class for vector register v4 +++reg_class v4_reg( +++ V4, V4_H, V4_J, V4_K +++); +++ +++// class for vector register v5 +++reg_class v5_reg( +++ V5, V5_H, V5_J, V5_K +++); +++ +++// class for condition codes +++reg_class reg_flags(RFLAGS); +++%} +++ +++//----------DEFINITION BLOCK--------------------------------------------------- +++// Define name --> value mappings to inform the ADLC of an integer valued name +++// Current support includes integer values in the range [0, 0x7FFFFFFF] +++// Format: +++// int_def ( , ); +++// Generated Code in ad_.hpp +++// #define () +++// // value == +++// Generated code in ad_.cpp adlc_verification() +++// assert( == , "Expect () to equal "); +++// +++ +++// we follow the ppc-aix port in using a simple cost model which ranks +++// register operations as cheap, memory ops as more expensive and +++// branches as most expensive. the first two have a low as well as a +++// normal cost. huge cost appears to be a way of saying don't do +++// something +++ +++definitions %{ +++ // The default cost (of a register move instruction). +++ int_def DEFAULT_COST ( 100, 100); +++ int_def ALU_COST ( 100, 1 * DEFAULT_COST); // unknown, const, arith, shift, slt, +++ // multi, auipc, nop, logical, move +++ int_def LOAD_COST ( 300, 3 * DEFAULT_COST); // load, fpload +++ int_def STORE_COST ( 100, 1 * DEFAULT_COST); // store, fpstore +++ int_def XFER_COST ( 300, 3 * DEFAULT_COST); // mfc, mtc, fcvt, fmove, fcmp +++ int_def BRANCH_COST ( 200, 2 * DEFAULT_COST); // branch, jmp, call +++ int_def IMUL_COST ( 1000, 10 * DEFAULT_COST); // imul +++ int_def IDIVSI_COST ( 3400, 34 * DEFAULT_COST); // idivdi +++ int_def IDIVDI_COST ( 6600, 66 * DEFAULT_COST); // idivsi +++ int_def FMUL_SINGLE_COST ( 500, 5 * DEFAULT_COST); // fmul, fmadd +++ int_def FMUL_DOUBLE_COST ( 700, 7 * DEFAULT_COST); // fmul, fmadd +++ int_def FDIV_COST ( 2000, 20 * DEFAULT_COST); // fdiv +++ int_def FSQRT_COST ( 2500, 25 * DEFAULT_COST); // fsqrt +++ int_def VOLATILE_REF_COST ( 1000, 10 * DEFAULT_COST); +++%} +++ +++ +++ +++//----------SOURCE BLOCK------------------------------------------------------- +++// This is a block of C++ code which provides values, functions, and +++// definitions necessary in the rest of the architecture description +++ +++source_hpp %{ +++ +++#include "asm/macroAssembler.hpp" +++#include "gc/shared/barrierSetAssembler.hpp" +++#include "gc/shared/cardTable.hpp" +++#include "gc/shared/cardTableBarrierSet.hpp" +++#include "gc/shared/collectedHeap.hpp" +++#include "opto/addnode.hpp" +++#include "opto/convertnode.hpp" +++#include "runtime/objectMonitor.hpp" +++ +++extern RegMask _ANY_REG32_mask; +++extern RegMask _ANY_REG_mask; +++extern RegMask _PTR_REG_mask; +++extern RegMask _NO_SPECIAL_REG32_mask; +++extern RegMask _NO_SPECIAL_REG_mask; +++extern RegMask _NO_SPECIAL_PTR_REG_mask; +++ +++class CallStubImpl { +++ +++ //-------------------------------------------------------------- +++ //---< Used for optimization in Compile::shorten_branches >--- +++ //-------------------------------------------------------------- +++ +++ public: +++ // Size of call trampoline stub. +++ static uint size_call_trampoline() { +++ return 0; // no call trampolines on this platform +++ } +++ +++ // number of relocations needed by a call trampoline stub +++ static uint reloc_call_trampoline() { +++ return 0; // no call trampolines on this platform +++ } +++}; +++ +++class HandlerImpl { +++ +++ public: +++ +++ static int emit_exception_handler(CodeBuffer &cbuf); +++ static int emit_deopt_handler(CodeBuffer& cbuf); +++ +++ static uint size_exception_handler() { +++ return MacroAssembler::far_branch_size(); +++ } +++ +++ static uint size_deopt_handler() { +++ // count auipc + far branch +++ return NativeInstruction::instruction_size + MacroAssembler::far_branch_size(); +++ } +++}; +++ +++class Node::PD { +++public: +++ enum NodeFlags { +++ _last_flag = Node::_last_flag +++ }; +++}; +++ +++bool is_CAS(int opcode, bool maybe_volatile); +++ +++// predicate controlling translation of CompareAndSwapX +++bool needs_acquiring_load_reserved(const Node *load); +++ +++// predicate controlling addressing modes +++bool size_fits_all_mem_uses(AddPNode* addp, int shift); +++%} +++ +++source %{ +++ +++// Derived RegMask with conditionally allocatable registers +++ +++RegMask _ANY_REG32_mask; +++RegMask _ANY_REG_mask; +++RegMask _PTR_REG_mask; +++RegMask _NO_SPECIAL_REG32_mask; +++RegMask _NO_SPECIAL_REG_mask; +++RegMask _NO_SPECIAL_PTR_REG_mask; +++ +++void reg_mask_init() { +++ +++ _ANY_REG32_mask = _ALL_REG32_mask; +++ _ANY_REG32_mask.Remove(OptoReg::as_OptoReg(x0->as_VMReg())); +++ +++ _ANY_REG_mask = _ALL_REG_mask; +++ _ANY_REG_mask.SUBTRACT(_ZR_REG_mask); +++ +++ _PTR_REG_mask = _ALL_REG_mask; +++ _PTR_REG_mask.SUBTRACT(_ZR_REG_mask); +++ +++ _NO_SPECIAL_REG32_mask = _ALL_REG32_mask; +++ _NO_SPECIAL_REG32_mask.SUBTRACT(_NON_ALLOCATABLE_REG32_mask); +++ +++ _NO_SPECIAL_REG_mask = _ALL_REG_mask; +++ _NO_SPECIAL_REG_mask.SUBTRACT(_NON_ALLOCATABLE_REG_mask); +++ +++ _NO_SPECIAL_PTR_REG_mask = _ALL_REG_mask; +++ _NO_SPECIAL_PTR_REG_mask.SUBTRACT(_NON_ALLOCATABLE_REG_mask); +++ +++ // x27 is not allocatable when compressed oops is on +++ if (UseCompressedOops) { +++ _NO_SPECIAL_REG32_mask.Remove(OptoReg::as_OptoReg(x27->as_VMReg())); +++ _NO_SPECIAL_REG_mask.SUBTRACT(_HEAPBASE_REG_mask); +++ _NO_SPECIAL_PTR_REG_mask.SUBTRACT(_HEAPBASE_REG_mask); +++ } +++ +++ // x8 is not allocatable when PreserveFramePointer is on +++ if (PreserveFramePointer) { +++ _NO_SPECIAL_REG32_mask.Remove(OptoReg::as_OptoReg(x8->as_VMReg())); +++ _NO_SPECIAL_REG_mask.SUBTRACT(_FP_REG_mask); +++ _NO_SPECIAL_PTR_REG_mask.SUBTRACT(_FP_REG_mask); +++ } +++} +++ +++void PhaseOutput::pd_perform_mach_node_analysis() { +++} +++ +++int MachNode::pd_alignment_required() const { +++ return 1; +++} +++ +++int MachNode::compute_padding(int current_offset) const { +++ return 0; +++} +++ +++// is_CAS(int opcode, bool maybe_volatile) +++// +++// return true if opcode is one of the possible CompareAndSwapX +++// values otherwise false. +++bool is_CAS(int opcode, bool maybe_volatile) +++{ +++ switch (opcode) { +++ // We handle these +++ case Op_CompareAndSwapI: +++ case Op_CompareAndSwapL: +++ case Op_CompareAndSwapP: +++ case Op_CompareAndSwapN: +++ case Op_ShenandoahCompareAndSwapP: +++ case Op_ShenandoahCompareAndSwapN: +++ case Op_CompareAndSwapB: +++ case Op_CompareAndSwapS: +++ case Op_GetAndSetI: +++ case Op_GetAndSetL: +++ case Op_GetAndSetP: +++ case Op_GetAndSetN: +++ case Op_GetAndAddI: +++ case Op_GetAndAddL: +++ return true; +++ case Op_CompareAndExchangeI: +++ case Op_CompareAndExchangeN: +++ case Op_CompareAndExchangeB: +++ case Op_CompareAndExchangeS: +++ case Op_CompareAndExchangeL: +++ case Op_CompareAndExchangeP: +++ case Op_WeakCompareAndSwapB: +++ case Op_WeakCompareAndSwapS: +++ case Op_WeakCompareAndSwapI: +++ case Op_WeakCompareAndSwapL: +++ case Op_WeakCompareAndSwapP: +++ case Op_WeakCompareAndSwapN: +++ case Op_ShenandoahWeakCompareAndSwapP: +++ case Op_ShenandoahWeakCompareAndSwapN: +++ case Op_ShenandoahCompareAndExchangeP: +++ case Op_ShenandoahCompareAndExchangeN: +++ return maybe_volatile; +++ default: +++ return false; +++ } +++} +++ +++// predicate controlling translation of CAS +++// +++// returns true if CAS needs to use an acquiring load otherwise false +++bool needs_acquiring_load_reserved(const Node *n) +++{ +++ assert(n != NULL && is_CAS(n->Opcode(), true), "expecting a compare and swap"); +++ +++ LoadStoreNode* ldst = n->as_LoadStore(); +++ if (n != NULL && is_CAS(n->Opcode(), false)) { +++ assert(ldst != NULL && ldst->trailing_membar() != NULL, "expected trailing membar"); +++ } else { +++ return ldst != NULL && ldst->trailing_membar() != NULL; +++ } +++ // so we can just return true here +++ return true; +++} +++#define __ _masm. +++ +++// advance declarations for helper functions to convert register +++// indices to register objects +++ +++// the ad file has to provide implementations of certain methods +++// expected by the generic code +++// +++// REQUIRED FUNCTIONALITY +++ +++//============================================================================= +++ +++// !!!!! Special hack to get all types of calls to specify the byte offset +++// from the start of the call to the point where the return address +++// will point. +++ +++int MachCallStaticJavaNode::ret_addr_offset() +++{ +++ // jal +++ return 1 * NativeInstruction::instruction_size; +++} +++ +++int MachCallDynamicJavaNode::ret_addr_offset() +++{ +++ return 7 * NativeInstruction::instruction_size; // movptr, jal +++} +++ +++int MachCallRuntimeNode::ret_addr_offset() { +++ // for generated stubs the call will be +++ // jal(addr) +++ // or with far branches +++ // jal(trampoline_stub) +++ // for real runtime callouts it will be 11 instructions +++ // see riscv_enc_java_to_runtime +++ // la(t1, retaddr) -> auipc + addi +++ // la(t0, RuntimeAddress(addr)) -> lui + addi + slli + addi + slli + addi +++ // addi(sp, sp, -2 * wordSize) -> addi +++ // sd(t1, Address(sp, wordSize)) -> sd +++ // jalr(t0) -> jalr +++ CodeBlob *cb = CodeCache::find_blob(_entry_point); +++ if (cb != NULL) { +++ return 1 * NativeInstruction::instruction_size; +++ } else { +++ return 11 * NativeInstruction::instruction_size; +++ } +++} +++ +++int MachCallNativeNode::ret_addr_offset() { +++ Unimplemented(); +++ return -1; +++} +++ +++// +++// Compute padding required for nodes which need alignment +++// +++ +++// With RVC a call instruction may get 2-byte aligned. +++// The address of the call instruction needs to be 4-byte aligned to +++// ensure that it does not span a cache line so that it can be patched. +++int CallStaticJavaDirectNode::compute_padding(int current_offset) const +++{ +++ // to make sure the address of jal 4-byte aligned. +++ return align_up(current_offset, alignment_required()) - current_offset; +++} +++ +++// With RVC a call instruction may get 2-byte aligned. +++// The address of the call instruction needs to be 4-byte aligned to +++// ensure that it does not span a cache line so that it can be patched. +++int CallDynamicJavaDirectNode::compute_padding(int current_offset) const +++{ +++ // skip the movptr in MacroAssembler::ic_call(): +++ // lui + addi + slli + addi + slli + addi +++ // Though movptr() has already 4-byte aligned with or without RVC, +++ // We need to prevent from further changes by explicitly calculating the size. +++ const int movptr_size = 6 * NativeInstruction::instruction_size; +++ current_offset += movptr_size; +++ // to make sure the address of jal 4-byte aligned. +++ return align_up(current_offset, alignment_required()) - current_offset; +++} +++ +++//============================================================================= +++ +++#ifndef PRODUCT +++void MachBreakpointNode::format(PhaseRegAlloc *ra_, outputStream *st) const { +++ assert_cond(st != NULL); +++ st->print("BREAKPOINT"); +++} +++#endif +++ +++void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const { +++ C2_MacroAssembler _masm(&cbuf); +++ __ ebreak(); +++} +++ +++uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const { +++ return MachNode::size(ra_); +++} +++ +++//============================================================================= +++ +++#ifndef PRODUCT +++ void MachNopNode::format(PhaseRegAlloc*, outputStream* st) const { +++ st->print("nop \t# %d bytes pad for loops and calls", _count); +++ } +++#endif +++ +++ void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc*) const { +++ C2_MacroAssembler _masm(&cbuf); +++ Assembler::CompressibleRegion cr(&_masm); // nops shall be 2-byte under RVC for alignment purposes. +++ for (int i = 0; i < _count; i++) { +++ __ nop(); +++ } +++ } +++ +++ uint MachNopNode::size(PhaseRegAlloc*) const { +++ return _count * (UseRVC ? NativeInstruction::compressed_instruction_size : NativeInstruction::instruction_size); +++ } +++ +++//============================================================================= +++const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty; +++ +++int ConstantTable::calculate_table_base_offset() const { +++ return 0; // absolute addressing, no offset +++} +++ +++bool MachConstantBaseNode::requires_postalloc_expand() const { return false; } +++void MachConstantBaseNode::postalloc_expand(GrowableArray *nodes, PhaseRegAlloc *ra_) { +++ ShouldNotReachHere(); +++} +++ +++void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const { +++ // Empty encoding +++} +++ +++uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const { +++ return 0; +++} +++ +++#ifndef PRODUCT +++void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const { +++ assert_cond(st != NULL); +++ st->print("-- \t// MachConstantBaseNode (empty encoding)"); +++} +++#endif +++ +++#ifndef PRODUCT +++void MachPrologNode::format(PhaseRegAlloc *ra_, outputStream *st) const { +++ assert_cond(st != NULL && ra_ != NULL); +++ Compile* C = ra_->C; +++ +++ int framesize = C->output()->frame_slots() << LogBytesPerInt; +++ +++ if (C->output()->need_stack_bang(framesize)) { +++ st->print("# stack bang size=%d\n\t", framesize); +++ } +++ +++ st->print("sd fp, [sp, #%d]\n\t", - 2 * wordSize); +++ st->print("sd ra, [sp, #%d]\n\t", - wordSize); +++ if (PreserveFramePointer) { st->print("sub fp, sp, #%d\n\t", 2 * wordSize); } +++ st->print("sub sp, sp, #%d\n\t", framesize); +++ +++ if (C->stub_function() == NULL && BarrierSet::barrier_set()->barrier_set_nmethod() != NULL) { +++ st->print("ld t0, [guard]\n\t"); +++ st->print("membar LoadLoad\n\t"); +++ st->print("ld t1, [xthread, #thread_disarmed_offset]\n\t"); +++ st->print("beq t0, t1, skip\n\t"); +++ st->print("jalr #nmethod_entry_barrier_stub\n\t"); +++ st->print("j skip\n\t"); +++ st->print("guard: int\n\t"); +++ st->print("skip:\n\t"); +++ } +++} +++#endif +++ +++void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const { +++ assert_cond(ra_ != NULL); +++ Compile* C = ra_->C; +++ C2_MacroAssembler _masm(&cbuf); +++ +++ // n.b. frame size includes space for return pc and fp +++ const int framesize = C->output()->frame_size_in_bytes(); +++ +++ // insert a nop at the start of the prolog so we can patch in a +++ // branch if we need to invalidate the method later +++ { +++ Assembler::IncompressibleRegion ir(&_masm); // keep the nop as 4 bytes for patching. +++ MacroAssembler::assert_alignment(__ pc()); +++ __ nop(); // 4 bytes +++ } +++ +++ assert_cond(C != NULL); +++ +++ if (C->clinit_barrier_on_entry()) { +++ assert(!C->method()->holder()->is_not_initialized(), "initialization should have been started"); +++ +++ Label L_skip_barrier; +++ +++ __ mov_metadata(t1, C->method()->holder()->constant_encoding()); +++ __ clinit_barrier(t1, t0, &L_skip_barrier); +++ __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub())); +++ __ bind(L_skip_barrier); +++ } +++ +++ int bangsize = C->output()->bang_size_in_bytes(); +++ if (C->output()->need_stack_bang(bangsize)) { +++ __ generate_stack_overflow_check(bangsize); +++ } +++ +++ __ build_frame(framesize); +++ +++ if (C->stub_function() == NULL) { +++ BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); +++ bs->nmethod_entry_barrier(&_masm); +++ } +++ +++ if (VerifyStackAtCalls) { +++ Unimplemented(); +++ } +++ +++ C->output()->set_frame_complete(cbuf.insts_size()); +++ +++ if (C->has_mach_constant_base_node()) { +++ // NOTE: We set the table base offset here because users might be +++ // emitted before MachConstantBaseNode. +++ ConstantTable& constant_table = C->output()->constant_table(); +++ constant_table.set_table_base_offset(constant_table.calculate_table_base_offset()); +++ } +++} +++ +++uint MachPrologNode::size(PhaseRegAlloc* ra_) const +++{ +++ assert_cond(ra_ != NULL); +++ return MachNode::size(ra_); // too many variables; just compute it +++ // the hard way +++} +++ +++int MachPrologNode::reloc() const +++{ +++ return 0; +++} +++ +++//============================================================================= +++ +++#ifndef PRODUCT +++void MachEpilogNode::format(PhaseRegAlloc *ra_, outputStream *st) const { +++ assert_cond(st != NULL && ra_ != NULL); +++ Compile* C = ra_->C; +++ assert_cond(C != NULL); +++ int framesize = C->output()->frame_size_in_bytes(); +++ +++ st->print("# pop frame %d\n\t", framesize); +++ +++ if (framesize == 0) { +++ st->print("ld ra, [sp,#%d]\n\t", (2 * wordSize)); +++ st->print("ld fp, [sp,#%d]\n\t", (3 * wordSize)); +++ st->print("add sp, sp, #%d\n\t", (2 * wordSize)); +++ } else { +++ st->print("add sp, sp, #%d\n\t", framesize); +++ st->print("ld ra, [sp,#%d]\n\t", - 2 * wordSize); +++ st->print("ld fp, [sp,#%d]\n\t", - wordSize); +++ } +++ +++ if (do_polling() && C->is_method_compilation()) { +++ st->print("# test polling word\n\t"); +++ st->print("ld t0, [xthread,#%d]\n\t", in_bytes(JavaThread::polling_word_offset())); +++ st->print("bgtu sp, t0, #slow_path"); +++ } +++} +++#endif +++ +++void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const { +++ assert_cond(ra_ != NULL); +++ Compile* C = ra_->C; +++ C2_MacroAssembler _masm(&cbuf); +++ assert_cond(C != NULL); +++ int framesize = C->output()->frame_size_in_bytes(); +++ +++ __ remove_frame(framesize); +++ +++ if (StackReservedPages > 0 && C->has_reserved_stack_access()) { +++ __ reserved_stack_check(); +++ } +++ +++ if (do_polling() && C->is_method_compilation()) { +++ Label dummy_label; +++ Label* code_stub = &dummy_label; +++ if (!C->output()->in_scratch_emit_size()) { +++ code_stub = &C->output()->safepoint_poll_table()->add_safepoint(__ offset()); +++ } +++ __ relocate(relocInfo::poll_return_type); +++ __ safepoint_poll(*code_stub, true /* at_return */, false /* acquire */, true /* in_nmethod */); +++ } +++} +++ +++uint MachEpilogNode::size(PhaseRegAlloc *ra_) const { +++ assert_cond(ra_ != NULL); +++ // Variable size. Determine dynamically. +++ return MachNode::size(ra_); +++} +++ +++int MachEpilogNode::reloc() const { +++ // Return number of relocatable values contained in this instruction. +++ return 1; // 1 for polling page. +++} +++const Pipeline * MachEpilogNode::pipeline() const { +++ return MachNode::pipeline_class(); +++} +++ +++//============================================================================= +++ +++// Figure out which register class each belongs in: rc_int, rc_float or +++// rc_stack. +++enum RC { rc_bad, rc_int, rc_float, rc_vector, rc_stack }; +++ +++static enum RC rc_class(OptoReg::Name reg) { +++ +++ if (reg == OptoReg::Bad) { +++ return rc_bad; +++ } +++ +++ // we have 30 int registers * 2 halves +++ // (t0 and t1 are omitted) +++ int slots_of_int_registers = RegisterImpl::max_slots_per_register * (RegisterImpl::number_of_registers - 2); +++ if (reg < slots_of_int_registers) { +++ return rc_int; +++ } +++ +++ // we have 32 float register * 2 halves +++ int slots_of_float_registers = FloatRegisterImpl::max_slots_per_register * FloatRegisterImpl::number_of_registers; +++ if (reg < slots_of_int_registers + slots_of_float_registers) { +++ return rc_float; +++ } +++ +++ // we have 32 vector register * 4 halves +++ int slots_of_vector_registers = VectorRegisterImpl::max_slots_per_register * VectorRegisterImpl::number_of_registers; +++ if (reg < slots_of_int_registers + slots_of_float_registers + slots_of_vector_registers) { +++ return rc_vector; +++ } +++ +++ // Between vector regs & stack is the flags regs. +++ assert(OptoReg::is_stack(reg), "blow up if spilling flags"); +++ +++ return rc_stack; +++} +++ +++uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream *st) const { +++ assert_cond(ra_ != NULL); +++ Compile* C = ra_->C; +++ +++ // Get registers to move. +++ OptoReg::Name src_hi = ra_->get_reg_second(in(1)); +++ OptoReg::Name src_lo = ra_->get_reg_first(in(1)); +++ OptoReg::Name dst_hi = ra_->get_reg_second(this); +++ OptoReg::Name dst_lo = ra_->get_reg_first(this); +++ +++ enum RC src_hi_rc = rc_class(src_hi); +++ enum RC src_lo_rc = rc_class(src_lo); +++ enum RC dst_hi_rc = rc_class(dst_hi); +++ enum RC dst_lo_rc = rc_class(dst_lo); +++ +++ assert(src_lo != OptoReg::Bad && dst_lo != OptoReg::Bad, "must move at least 1 register"); +++ +++ if (src_hi != OptoReg::Bad) { +++ assert((src_lo & 1) == 0 && src_lo + 1 == src_hi && +++ (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi, +++ "expected aligned-adjacent pairs"); +++ } +++ +++ if (src_lo == dst_lo && src_hi == dst_hi) { +++ return 0; // Self copy, no move. +++ } +++ +++ bool is64 = (src_lo & 1) == 0 && src_lo + 1 == src_hi && +++ (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi; +++ int src_offset = ra_->reg2offset(src_lo); +++ int dst_offset = ra_->reg2offset(dst_lo); +++ +++ if (bottom_type()->isa_vect() != NULL) { +++ uint ireg = ideal_reg(); +++ if (ireg == Op_VecA && cbuf) { +++ C2_MacroAssembler _masm(cbuf); +++ int vector_reg_size_in_bytes = Matcher::scalable_vector_reg_size(T_BYTE); +++ if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) { +++ // stack to stack +++ __ spill_copy_vector_stack_to_stack(src_offset, dst_offset, +++ vector_reg_size_in_bytes); +++ } else if (src_lo_rc == rc_vector && dst_lo_rc == rc_stack) { +++ // vpr to stack +++ __ spill(as_VectorRegister(Matcher::_regEncode[src_lo]), ra_->reg2offset(dst_lo)); +++ } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_vector) { +++ // stack to vpr +++ __ unspill(as_VectorRegister(Matcher::_regEncode[dst_lo]), ra_->reg2offset(src_lo)); +++ } else if (src_lo_rc == rc_vector && dst_lo_rc == rc_vector) { +++ // vpr to vpr +++ __ vmv1r_v(as_VectorRegister(Matcher::_regEncode[dst_lo]), as_VectorRegister(Matcher::_regEncode[src_lo])); +++ } else { +++ ShouldNotReachHere(); +++ } +++ } +++ } else if (cbuf != NULL) { +++ C2_MacroAssembler _masm(cbuf); +++ switch (src_lo_rc) { +++ case rc_int: +++ if (dst_lo_rc == rc_int) { // gpr --> gpr copy +++ if (!is64 && this->ideal_reg() != Op_RegI) { // zero extended for narrow oop or klass +++ __ zero_extend(as_Register(Matcher::_regEncode[dst_lo]), as_Register(Matcher::_regEncode[src_lo]), 32); +++ } else { +++ __ mv(as_Register(Matcher::_regEncode[dst_lo]), as_Register(Matcher::_regEncode[src_lo])); +++ } +++ } else if (dst_lo_rc == rc_float) { // gpr --> fpr copy +++ if (is64) { +++ __ fmv_d_x(as_FloatRegister(Matcher::_regEncode[dst_lo]), +++ as_Register(Matcher::_regEncode[src_lo])); +++ } else { +++ __ fmv_w_x(as_FloatRegister(Matcher::_regEncode[dst_lo]), +++ as_Register(Matcher::_regEncode[src_lo])); +++ } +++ } else { // gpr --> stack spill +++ assert(dst_lo_rc == rc_stack, "spill to bad register class"); +++ __ spill(as_Register(Matcher::_regEncode[src_lo]), is64, dst_offset); +++ } +++ break; +++ case rc_float: +++ if (dst_lo_rc == rc_int) { // fpr --> gpr copy +++ if (is64) { +++ __ fmv_x_d(as_Register(Matcher::_regEncode[dst_lo]), +++ as_FloatRegister(Matcher::_regEncode[src_lo])); +++ } else { +++ __ fmv_x_w(as_Register(Matcher::_regEncode[dst_lo]), +++ as_FloatRegister(Matcher::_regEncode[src_lo])); +++ } +++ } else if (dst_lo_rc == rc_float) { // fpr --> fpr copy +++ if (is64) { +++ __ fmv_d(as_FloatRegister(Matcher::_regEncode[dst_lo]), +++ as_FloatRegister(Matcher::_regEncode[src_lo])); +++ } else { +++ __ fmv_s(as_FloatRegister(Matcher::_regEncode[dst_lo]), +++ as_FloatRegister(Matcher::_regEncode[src_lo])); +++ } +++ } else { // fpr --> stack spill +++ assert(dst_lo_rc == rc_stack, "spill to bad register class"); +++ __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]), +++ is64, dst_offset); +++ } +++ break; +++ case rc_stack: +++ if (dst_lo_rc == rc_int) { // stack --> gpr load +++ if (this->ideal_reg() == Op_RegI) { +++ __ unspill(as_Register(Matcher::_regEncode[dst_lo]), is64, src_offset); +++ } else { // // zero extended for narrow oop or klass +++ __ unspillu(as_Register(Matcher::_regEncode[dst_lo]), is64, src_offset); +++ } +++ } else if (dst_lo_rc == rc_float) { // stack --> fpr load +++ __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]), +++ is64, src_offset); +++ } else { // stack --> stack copy +++ assert(dst_lo_rc == rc_stack, "spill to bad register class"); +++ if (this->ideal_reg() == Op_RegI) { +++ __ unspill(t0, is64, src_offset); +++ } else { // zero extended for narrow oop or klass +++ __ unspillu(t0, is64, src_offset); +++ } +++ __ spill(t0, is64, dst_offset); +++ } +++ break; +++ default: +++ ShouldNotReachHere(); +++ } +++ } +++ +++ if (st != NULL) { +++ st->print("spill "); +++ if (src_lo_rc == rc_stack) { +++ st->print("[sp, #%d] -> ", src_offset); +++ } else { +++ st->print("%s -> ", Matcher::regName[src_lo]); +++ } +++ if (dst_lo_rc == rc_stack) { +++ st->print("[sp, #%d]", dst_offset); +++ } else { +++ st->print("%s", Matcher::regName[dst_lo]); +++ } +++ if (bottom_type()->isa_vect() != NULL) { +++ int vsize = 0; +++ if (ideal_reg() == Op_VecA) { +++ vsize = Matcher::scalable_vector_reg_size(T_BYTE) * 8; +++ } else { +++ ShouldNotReachHere(); +++ } +++ st->print("\t# vector spill size = %d", vsize); +++ } else { +++ st->print("\t# spill size = %d", is64 ? 64 : 32); +++ } +++ } +++ +++ return 0; +++} +++ +++#ifndef PRODUCT +++void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream *st) const { +++ if (ra_ == NULL) { +++ st->print("N%d = SpillCopy(N%d)", _idx, in(1)->_idx); +++ } else { +++ implementation(NULL, ra_, false, st); +++ } +++} +++#endif +++ +++void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const { +++ implementation(&cbuf, ra_, false, NULL); +++} +++ +++uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const { +++ return MachNode::size(ra_); +++} +++ +++//============================================================================= +++ +++#ifndef PRODUCT +++void BoxLockNode::format(PhaseRegAlloc *ra_, outputStream *st) const { +++ assert_cond(ra_ != NULL && st != NULL); +++ int offset = ra_->reg2offset(in_RegMask(0).find_first_elem()); +++ int reg = ra_->get_reg_first(this); +++ st->print("add %s, sp, #%d\t# box lock", +++ Matcher::regName[reg], offset); +++} +++#endif +++ +++void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const { +++ C2_MacroAssembler _masm(&cbuf); +++ Assembler::IncompressibleRegion ir(&_masm); // Fixed length: see BoxLockNode::size() +++ +++ assert_cond(ra_ != NULL); +++ int offset = ra_->reg2offset(in_RegMask(0).find_first_elem()); +++ int reg = ra_->get_encode(this); +++ +++ if (Assembler::is_simm12(offset)) { +++ __ addi(as_Register(reg), sp, offset); +++ } else { +++ __ li32(t0, offset); +++ __ add(as_Register(reg), sp, t0); +++ } +++} +++ +++uint BoxLockNode::size(PhaseRegAlloc *ra_) const { +++ // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_). +++ int offset = ra_->reg2offset(in_RegMask(0).find_first_elem()); +++ +++ if (Assembler::is_simm12(offset)) { +++ return NativeInstruction::instruction_size; +++ } else { +++ return 3 * NativeInstruction::instruction_size; // lui + addiw + add; +++ } +++} +++ +++//============================================================================= +++ +++#ifndef PRODUCT +++void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const +++{ +++ assert_cond(st != NULL); +++ st->print_cr("# MachUEPNode"); +++ if (UseCompressedClassPointers) { +++ st->print_cr("\tlwu t0, [j_rarg0, oopDesc::klass_offset_in_bytes()]\t# compressed klass"); +++ if (CompressedKlassPointers::shift() != 0) { +++ st->print_cr("\tdecode_klass_not_null t0, t0"); +++ } +++ } else { +++ st->print_cr("\tld t0, [j_rarg0, oopDesc::klass_offset_in_bytes()]\t# compressed klass"); +++ } +++ st->print_cr("\tbeq t0, t1, ic_hit"); +++ st->print_cr("\tj, SharedRuntime::_ic_miss_stub\t # Inline cache check"); +++ st->print_cr("\tic_hit:"); +++} +++#endif +++ +++void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const +++{ +++ // This is the unverified entry point. +++ C2_MacroAssembler _masm(&cbuf); +++ +++ Label skip; +++ __ cmp_klass(j_rarg0, t1, t0, t2 /* call-clobbered t2 as a tmp */, skip); +++ __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub())); +++ __ bind(skip); +++ +++ // These NOPs are critical so that verified entry point is properly +++ // 4 bytes aligned for patching by NativeJump::patch_verified_entry() +++ __ align(NativeInstruction::instruction_size); +++} +++ +++uint MachUEPNode::size(PhaseRegAlloc* ra_) const +++{ +++ assert_cond(ra_ != NULL); +++ return MachNode::size(ra_); +++} +++ +++// REQUIRED EMIT CODE +++ +++//============================================================================= +++ +++// Emit exception handler code. +++int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf) +++{ +++ // la_patchable t0, #exception_blob_entry_point +++ // jr (offset)t0 +++ // or +++ // j #exception_blob_entry_point +++ // Note that the code buffer's insts_mark is always relative to insts. +++ // That's why we must use the macroassembler to generate a handler. +++ C2_MacroAssembler _masm(&cbuf); +++ address base = __ start_a_stub(size_exception_handler()); +++ if (base == NULL) { +++ ciEnv::current()->record_failure("CodeCache is full"); +++ return 0; // CodeBuffer::expand failed +++ } +++ int offset = __ offset(); +++ __ far_jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point())); +++ assert(__ offset() - offset <= (int) size_exception_handler(), "overflow"); +++ __ end_a_stub(); +++ return offset; +++} +++ +++// Emit deopt handler code. +++int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf) +++{ +++ // Note that the code buffer's insts_mark is always relative to insts. +++ // That's why we must use the macroassembler to generate a handler. +++ C2_MacroAssembler _masm(&cbuf); +++ address base = __ start_a_stub(size_deopt_handler()); +++ if (base == NULL) { +++ ciEnv::current()->record_failure("CodeCache is full"); +++ return 0; // CodeBuffer::expand failed +++ } +++ int offset = __ offset(); +++ +++ __ auipc(ra, 0); +++ __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack())); +++ +++ assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow"); +++ __ end_a_stub(); +++ return offset; +++ +++} +++// REQUIRED MATCHER CODE +++ +++//============================================================================= +++ +++const bool Matcher::match_rule_supported(int opcode) { +++ if (!has_match_rule(opcode)) { +++ return false; +++ } +++ +++ switch (opcode) { +++ case Op_CacheWB: // fall through +++ case Op_CacheWBPreSync: // fall through +++ case Op_CacheWBPostSync: +++ if (!VM_Version::supports_data_cache_line_flush()) { +++ return false; +++ } +++ break; +++ +++ case Op_StrCompressedCopy: // fall through +++ case Op_StrInflatedCopy: // fall through +++ case Op_HasNegatives: +++ return UseRVV; +++ +++ case Op_EncodeISOArray: +++ return UseRVV && SpecialEncodeISOArray; +++ +++ case Op_PopCountI: +++ case Op_PopCountL: +++ return UsePopCountInstruction; +++ +++ case Op_RotateRight: +++ case Op_RotateLeft: +++ case Op_CountLeadingZerosI: +++ case Op_CountLeadingZerosL: +++ case Op_CountTrailingZerosI: +++ case Op_CountTrailingZerosL: +++ return UseZbb; +++ } +++ +++ return true; // Per default match rules are supported. +++} +++ +++// Identify extra cases that we might want to provide match rules for vector nodes and +++// other intrinsics guarded with vector length (vlen) and element type (bt). +++const bool Matcher::match_rule_supported_vector(int opcode, int vlen, BasicType bt) { +++ if (!match_rule_supported(opcode) || !vector_size_supported(bt, vlen)) { +++ return false; +++ } +++ +++ return op_vec_supported(opcode); +++} +++ +++const RegMask* Matcher::predicate_reg_mask(void) { +++ return NULL; +++} +++ +++const TypeVect* Matcher::predicate_reg_type(const Type* elemTy, int length) { +++ return NULL; +++} +++ +++// Vector calling convention not yet implemented. +++const bool Matcher::supports_vector_calling_convention(void) { +++ return false; +++} +++ +++OptoRegPair Matcher::vector_return_value(uint ideal_reg) { +++ Unimplemented(); +++ return OptoRegPair(0, 0); +++} +++ +++// Is this branch offset short enough that a short branch can be used? +++// +++// NOTE: If the platform does not provide any short branch variants, then +++// this method should return false for offset 0. +++// |---label(L1)-----| +++// |-----------------| +++// |-----------------|----------eq: float------------------- +++// |-----------------| // far_cmpD_branch | cmpD_branch +++// |------- ---------| feq; | feq; +++// |-far_cmpD_branch-| beqz done; | bnez L; +++// |-----------------| j L; | +++// |-----------------| bind(done); | +++// |-----------------|-------------------------------------- +++// |-----------------| // so shortBrSize = br_size - 4; +++// |-----------------| // so offs = offset - shortBrSize + 4; +++// |---label(L2)-----| +++bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) { +++ // The passed offset is relative to address of the branch. +++ int shortBrSize = br_size - 4; +++ int offs = offset - shortBrSize + 4; +++ return (-4096 <= offs && offs < 4096); +++} +++ +++// Vector width in bytes. +++const int Matcher::vector_width_in_bytes(BasicType bt) { +++ if (UseRVV) { +++ // The MaxVectorSize should have been set by detecting RVV max vector register size when check UseRVV. +++ // MaxVectorSize == VM_Version::_initial_vector_length +++ return MaxVectorSize; +++ } +++ return 0; +++} +++ +++// Limits on vector size (number of elements) loaded into vector. +++const int Matcher::max_vector_size(const BasicType bt) { +++ return vector_width_in_bytes(bt) / type2aelembytes(bt); +++} +++const int Matcher::min_vector_size(const BasicType bt) { +++ return max_vector_size(bt); +++} +++ +++// Vector ideal reg. +++const uint Matcher::vector_ideal_reg(int len) { +++ assert(MaxVectorSize >= len, ""); +++ if (UseRVV) { +++ return Op_VecA; +++ } +++ +++ ShouldNotReachHere(); +++ return 0; +++} +++ +++const int Matcher::scalable_vector_reg_size(const BasicType bt) { +++ return Matcher::max_vector_size(bt); +++} +++ +++MachOper* Matcher::pd_specialize_generic_vector_operand(MachOper* original_opnd, uint ideal_reg, bool is_temp) { +++ ShouldNotReachHere(); // generic vector operands not supported +++ return NULL; +++} +++ +++bool Matcher::is_generic_reg2reg_move(MachNode* m) { +++ ShouldNotReachHere(); // generic vector operands not supported +++ return false; +++} +++ +++bool Matcher::is_generic_vector(MachOper* opnd) { +++ ShouldNotReachHere(); // generic vector operands not supported +++ return false; +++} +++ +++// Return whether or not this register is ever used as an argument. +++// This function is used on startup to build the trampoline stubs in +++// generateOptoStub. Registers not mentioned will be killed by the VM +++// call in the trampoline, and arguments in those registers not be +++// available to the callee. +++bool Matcher::can_be_java_arg(int reg) +++{ +++ return +++ reg == R10_num || reg == R10_H_num || +++ reg == R11_num || reg == R11_H_num || +++ reg == R12_num || reg == R12_H_num || +++ reg == R13_num || reg == R13_H_num || +++ reg == R14_num || reg == R14_H_num || +++ reg == R15_num || reg == R15_H_num || +++ reg == R16_num || reg == R16_H_num || +++ reg == R17_num || reg == R17_H_num || +++ reg == F10_num || reg == F10_H_num || +++ reg == F11_num || reg == F11_H_num || +++ reg == F12_num || reg == F12_H_num || +++ reg == F13_num || reg == F13_H_num || +++ reg == F14_num || reg == F14_H_num || +++ reg == F15_num || reg == F15_H_num || +++ reg == F16_num || reg == F16_H_num || +++ reg == F17_num || reg == F17_H_num; +++} +++ +++bool Matcher::is_spillable_arg(int reg) +++{ +++ return can_be_java_arg(reg); +++} +++ +++const int Matcher::float_pressure(int default_pressure_threshold) { +++ return default_pressure_threshold; +++} +++ +++bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) { +++ return false; +++} +++ +++RegMask Matcher::divI_proj_mask() { +++ ShouldNotReachHere(); +++ return RegMask(); +++} +++ +++// Register for MODI projection of divmodI. +++RegMask Matcher::modI_proj_mask() { +++ ShouldNotReachHere(); +++ return RegMask(); +++} +++ +++// Register for DIVL projection of divmodL. +++RegMask Matcher::divL_proj_mask() { +++ ShouldNotReachHere(); +++ return RegMask(); +++} +++ +++// Register for MODL projection of divmodL. +++RegMask Matcher::modL_proj_mask() { +++ ShouldNotReachHere(); +++ return RegMask(); +++} +++ +++const RegMask Matcher::method_handle_invoke_SP_save_mask() { +++ return FP_REG_mask(); +++} +++ +++bool size_fits_all_mem_uses(AddPNode* addp, int shift) { +++ assert_cond(addp != NULL); +++ for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) { +++ Node* u = addp->fast_out(i); +++ if (u != NULL && u->is_Mem()) { +++ int opsize = u->as_Mem()->memory_size(); +++ assert(opsize > 0, "unexpected memory operand size"); +++ if (u->as_Mem()->memory_size() != (1 << shift)) { +++ return false; +++ } +++ } +++ } +++ return true; +++} +++ +++// Should the Matcher clone input 'm' of node 'n'? +++bool Matcher::pd_clone_node(Node* n, Node* m, Matcher::MStack& mstack) { +++ assert_cond(m != NULL); +++ if (is_vshift_con_pattern(n, m)) { // ShiftV src (ShiftCntV con) +++ mstack.push(m, Visit); // m = ShiftCntV +++ return true; +++ } +++ return false; +++} +++ +++// Should the Matcher clone shifts on addressing modes, expecting them +++// to be subsumed into complex addressing expressions or compute them +++// into registers? +++bool Matcher::pd_clone_address_expressions(AddPNode* m, Matcher::MStack& mstack, VectorSet& address_visited) { +++ return clone_base_plus_offset_address(m, mstack, address_visited); +++} +++ +++%} +++ +++ +++ +++//----------ENCODING BLOCK----------------------------------------------------- +++// This block specifies the encoding classes used by the compiler to +++// output byte streams. Encoding classes are parameterized macros +++// used by Machine Instruction Nodes in order to generate the bit +++// encoding of the instruction. Operands specify their base encoding +++// interface with the interface keyword. There are currently +++// supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, & +++// COND_INTER. REG_INTER causes an operand to generate a function +++// which returns its register number when queried. CONST_INTER causes +++// an operand to generate a function which returns the value of the +++// constant when queried. MEMORY_INTER causes an operand to generate +++// four functions which return the Base Register, the Index Register, +++// the Scale Value, and the Offset Value of the operand when queried. +++// COND_INTER causes an operand to generate six functions which return +++// the encoding code (ie - encoding bits for the instruction) +++// associated with each basic boolean condition for a conditional +++// instruction. +++// +++// Instructions specify two basic values for encoding. Again, a +++// function is available to check if the constant displacement is an +++// oop. They use the ins_encode keyword to specify their encoding +++// classes (which must be a sequence of enc_class names, and their +++// parameters, specified in the encoding block), and they use the +++// opcode keyword to specify, in order, their primary, secondary, and +++// tertiary opcode. Only the opcode sections which a particular +++// instruction needs for encoding need to be specified. +++encode %{ +++ // BEGIN Non-volatile memory access +++ +++ enc_class riscv_enc_li_imm(iRegIorL dst, immIorL src) %{ +++ C2_MacroAssembler _masm(&cbuf); +++ int64_t con = (int64_t)$src$$constant; +++ Register dst_reg = as_Register($dst$$reg); +++ __ mv(dst_reg, con); +++ %} +++ +++ enc_class riscv_enc_mov_p(iRegP dst, immP src) %{ +++ C2_MacroAssembler _masm(&cbuf); +++ Register dst_reg = as_Register($dst$$reg); +++ address con = (address)$src$$constant; +++ if (con == NULL || con == (address)1) { +++ ShouldNotReachHere(); +++ } else { +++ relocInfo::relocType rtype = $src->constant_reloc(); +++ if (rtype == relocInfo::oop_type) { +++ __ movoop(dst_reg, (jobject)con, /*immediate*/true); +++ } else if (rtype == relocInfo::metadata_type) { +++ __ mov_metadata(dst_reg, (Metadata*)con); +++ } else { +++ assert(rtype == relocInfo::none, "unexpected reloc type"); +++ __ mv(dst_reg, $src$$constant); +++ } +++ } +++ %} +++ +++ enc_class riscv_enc_mov_p1(iRegP dst) %{ +++ C2_MacroAssembler _masm(&cbuf); +++ Register dst_reg = as_Register($dst$$reg); +++ __ mv(dst_reg, 1); +++ %} +++ +++ enc_class riscv_enc_mov_byte_map_base(iRegP dst) %{ +++ C2_MacroAssembler _masm(&cbuf); +++ __ load_byte_map_base($dst$$Register); +++ %} +++ +++ enc_class riscv_enc_mov_n(iRegN dst, immN src) %{ +++ C2_MacroAssembler _masm(&cbuf); +++ Register dst_reg = as_Register($dst$$reg); +++ address con = (address)$src$$constant; +++ if (con == NULL) { +++ ShouldNotReachHere(); +++ } else { +++ relocInfo::relocType rtype = $src->constant_reloc(); +++ assert(rtype == relocInfo::oop_type, "unexpected reloc type"); +++ __ set_narrow_oop(dst_reg, (jobject)con); +++ } +++ %} +++ +++ enc_class riscv_enc_mov_zero(iRegNorP dst) %{ +++ C2_MacroAssembler _masm(&cbuf); +++ Register dst_reg = as_Register($dst$$reg); +++ __ mv(dst_reg, zr); +++ %} +++ +++ enc_class riscv_enc_mov_nk(iRegN dst, immNKlass src) %{ +++ C2_MacroAssembler _masm(&cbuf); +++ Register dst_reg = as_Register($dst$$reg); +++ address con = (address)$src$$constant; +++ if (con == NULL) { +++ ShouldNotReachHere(); +++ } else { +++ relocInfo::relocType rtype = $src->constant_reloc(); +++ assert(rtype == relocInfo::metadata_type, "unexpected reloc type"); +++ __ set_narrow_klass(dst_reg, (Klass *)con); +++ } +++ %} +++ +++ enc_class riscv_enc_cmpxchgw(iRegINoSp res, memory mem, iRegI oldval, iRegI newval) %{ +++ C2_MacroAssembler _masm(&cbuf); +++ __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int32, +++ /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register, +++ /*result as bool*/ true); +++ %} +++ +++ enc_class riscv_enc_cmpxchgn(iRegINoSp res, memory mem, iRegI oldval, iRegI newval) %{ +++ C2_MacroAssembler _masm(&cbuf); +++ __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::uint32, +++ /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register, +++ /*result as bool*/ true); +++ %} +++ +++ enc_class riscv_enc_cmpxchg(iRegINoSp res, memory mem, iRegL oldval, iRegL newval) %{ +++ C2_MacroAssembler _masm(&cbuf); +++ __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int64, +++ /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register, +++ /*result as bool*/ true); +++ %} +++ +++ enc_class riscv_enc_cmpxchgw_acq(iRegINoSp res, memory mem, iRegI oldval, iRegI newval) %{ +++ C2_MacroAssembler _masm(&cbuf); +++ __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int32, +++ /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register, +++ /*result as bool*/ true); +++ %} +++ +++ enc_class riscv_enc_cmpxchgn_acq(iRegINoSp res, memory mem, iRegI oldval, iRegI newval) %{ +++ C2_MacroAssembler _masm(&cbuf); +++ __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::uint32, +++ /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register, +++ /*result as bool*/ true); +++ %} +++ +++ enc_class riscv_enc_cmpxchg_acq(iRegINoSp res, memory mem, iRegL oldval, iRegL newval) %{ +++ C2_MacroAssembler _masm(&cbuf); +++ __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int64, +++ /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register, +++ /*result as bool*/ true); +++ %} +++ +++ // compare and branch instruction encodings +++ +++ enc_class riscv_enc_j(label lbl) %{ +++ C2_MacroAssembler _masm(&cbuf); +++ Label* L = $lbl$$label; +++ __ j(*L); +++ %} +++ +++ enc_class riscv_enc_far_cmpULtGe_imm0_branch(cmpOpULtGe cmp, iRegIorL op1, label lbl) %{ +++ C2_MacroAssembler _masm(&cbuf); +++ Label* L = $lbl$$label; +++ switch ($cmp$$cmpcode) { +++ case(BoolTest::ge): +++ __ j(*L); +++ break; +++ case(BoolTest::lt): +++ break; +++ default: +++ Unimplemented(); +++ } +++ %} +++ +++ // call instruction encodings +++ +++ enc_class riscv_enc_partial_subtype_check(iRegP sub, iRegP super, iRegP temp, iRegP result) %{ +++ Register sub_reg = as_Register($sub$$reg); +++ Register super_reg = as_Register($super$$reg); +++ Register temp_reg = as_Register($temp$$reg); +++ Register result_reg = as_Register($result$$reg); +++ Register cr_reg = t1; +++ +++ Label miss; +++ Label done; +++ C2_MacroAssembler _masm(&cbuf); +++ __ check_klass_subtype_slow_path(sub_reg, super_reg, temp_reg, result_reg, +++ NULL, &miss); +++ if ($primary) { +++ __ mv(result_reg, zr); +++ } else { +++ __ mv(cr_reg, zr); +++ __ j(done); +++ } +++ +++ __ bind(miss); +++ if (!$primary) { +++ __ mv(cr_reg, 1); +++ } +++ +++ __ bind(done); +++ %} +++ +++ enc_class riscv_enc_java_static_call(method meth) %{ +++ C2_MacroAssembler _masm(&cbuf); +++ Assembler::IncompressibleRegion ir(&_masm); // Fixed length: see ret_addr_offset +++ +++ address addr = (address)$meth$$method; +++ address call = NULL; +++ assert_cond(addr != NULL); +++ if (!_method) { +++ // A call to a runtime wrapper, e.g. new, new_typeArray_Java, uncommon_trap. +++ call = __ trampoline_call(Address(addr, relocInfo::runtime_call_type), &cbuf); +++ if (call == NULL) { +++ ciEnv::current()->record_failure("CodeCache is full"); +++ return; +++ } +++ } else { +++ int method_index = resolved_method_index(cbuf); +++ RelocationHolder rspec = _optimized_virtual ? opt_virtual_call_Relocation::spec(method_index) +++ : static_call_Relocation::spec(method_index); +++ call = __ trampoline_call(Address(addr, rspec), &cbuf); +++ if (call == NULL) { +++ ciEnv::current()->record_failure("CodeCache is full"); +++ return; +++ } +++ +++ // Emit stub for static call +++ address stub = CompiledStaticCall::emit_to_interp_stub(cbuf); +++ if (stub == NULL) { +++ ciEnv::current()->record_failure("CodeCache is full"); +++ return; +++ } +++ } +++ %} +++ +++ enc_class riscv_enc_java_dynamic_call(method meth) %{ +++ C2_MacroAssembler _masm(&cbuf); +++ Assembler::IncompressibleRegion ir(&_masm); // Fixed length: see ret_addr_offset +++ int method_index = resolved_method_index(cbuf); +++ address call = __ ic_call((address)$meth$$method, method_index); +++ if (call == NULL) { +++ ciEnv::current()->record_failure("CodeCache is full"); +++ return; +++ } +++ %} +++ +++ enc_class riscv_enc_call_epilog() %{ +++ C2_MacroAssembler _masm(&cbuf); +++ if (VerifyStackAtCalls) { +++ // Check that stack depth is unchanged: find majik cookie on stack +++ __ call_Unimplemented(); +++ } +++ %} +++ +++ enc_class riscv_enc_java_to_runtime(method meth) %{ +++ C2_MacroAssembler _masm(&cbuf); +++ Assembler::IncompressibleRegion ir(&_masm); // Fixed length: see ret_addr_offset +++ +++ // some calls to generated routines (arraycopy code) are scheduled +++ // by C2 as runtime calls. if so we can call them using a jr (they +++ // will be in a reachable segment) otherwise we have to use a jalr +++ // which loads the absolute address into a register. +++ address entry = (address)$meth$$method; +++ CodeBlob *cb = CodeCache::find_blob(entry); +++ if (cb != NULL) { +++ address call = __ trampoline_call(Address(entry, relocInfo::runtime_call_type)); +++ if (call == NULL) { +++ ciEnv::current()->record_failure("CodeCache is full"); +++ return; +++ } +++ } else { +++ Label retaddr; +++ __ la(t1, retaddr); +++ __ la(t0, RuntimeAddress(entry)); +++ // Leave a breadcrumb for JavaFrameAnchor::capture_last_Java_pc() +++ __ addi(sp, sp, -2 * wordSize); +++ __ sd(t1, Address(sp, wordSize)); +++ __ jalr(t0); +++ __ bind(retaddr); +++ __ addi(sp, sp, 2 * wordSize); +++ } +++ %} +++ +++ // using the cr register as the bool result: 0 for success; others failed. +++ enc_class riscv_enc_fast_lock(iRegP object, iRegP box, iRegPNoSp tmp1, iRegPNoSp tmp2) %{ +++ C2_MacroAssembler _masm(&cbuf); +++ Register flag = t1; +++ Register oop = as_Register($object$$reg); +++ Register box = as_Register($box$$reg); +++ Register disp_hdr = as_Register($tmp1$$reg); +++ Register tmp = as_Register($tmp2$$reg); +++ Label cont; +++ Label object_has_monitor; +++ +++ assert_different_registers(oop, box, tmp, disp_hdr, t0); +++ +++ // Load markWord from object into displaced_header. +++ __ ld(disp_hdr, Address(oop, oopDesc::mark_offset_in_bytes())); +++ +++ if (DiagnoseSyncOnValueBasedClasses != 0) { +++ __ load_klass(flag, oop); +++ __ lwu(flag, Address(flag, Klass::access_flags_offset())); +++ __ test_bit(flag, flag, exact_log2(JVM_ACC_IS_VALUE_BASED_CLASS), tmp /* tmp */); +++ __ bnez(flag, cont, true /* is_far */); +++ } +++ +++ if (UseBiasedLocking && !UseOptoBiasInlining) { +++ __ biased_locking_enter(box, oop, disp_hdr, tmp, true, cont, NULL /* slow_case */, NULL, flag); +++ } +++ +++ // Check for existing monitor +++ __ test_bit(t0, disp_hdr, exact_log2(markWord::monitor_value)); +++ __ bnez(t0, object_has_monitor); +++ +++ // Set tmp to be (markWord of object | UNLOCK_VALUE). +++ __ ori(tmp, disp_hdr, markWord::unlocked_value); +++ +++ // Initialize the box. (Must happen before we update the object mark!) +++ __ sd(tmp, Address(box, BasicLock::displaced_header_offset_in_bytes())); +++ +++ // Compare object markWord with an unlocked value (tmp) and if +++ // equal exchange the stack address of our box with object markWord. +++ // On failure disp_hdr contains the possibly locked markWord. +++ __ cmpxchg(/*memory address*/oop, /*expected value*/tmp, /*new value*/box, Assembler::int64, Assembler::aq, +++ Assembler::rl, /*result*/disp_hdr); +++ __ mv(flag, zr); +++ __ beq(disp_hdr, tmp, cont); // prepare zero flag and goto cont if we won the cas +++ +++ assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0"); +++ +++ // If the compare-and-exchange succeeded, then we found an unlocked +++ // object, will have now locked it will continue at label cont +++ // We did not see an unlocked object so try the fast recursive case. +++ +++ // Check if the owner is self by comparing the value in the +++ // markWord of object (disp_hdr) with the stack pointer. +++ __ sub(disp_hdr, disp_hdr, sp); +++ __ mv(tmp, (intptr_t) (~(os::vm_page_size()-1) | (uintptr_t)markWord::lock_mask_in_place)); +++ // If (mark & lock_mask) == 0 and mark - sp < page_size, we are stack-locking and goto cont, +++ // hence we can store 0 as the displaced header in the box, which indicates that it is a +++ // recursive lock. +++ __ andr(tmp/*==0?*/, disp_hdr, tmp); +++ __ sd(tmp/*==0, perhaps*/, Address(box, BasicLock::displaced_header_offset_in_bytes())); +++ __ mv(flag, tmp); // we can use the value of tmp as the result here +++ +++ __ j(cont); +++ +++ // Handle existing monitor. +++ __ bind(object_has_monitor); +++ // The object's monitor m is unlocked iff m->owner == NULL, +++ // otherwise m->owner may contain a thread or a stack address. +++ // +++ // Try to CAS m->owner from NULL to current thread. +++ __ add(tmp, disp_hdr, (ObjectMonitor::owner_offset_in_bytes() - markWord::monitor_value)); +++ __ cmpxchg(/*memory address*/tmp, /*expected value*/zr, /*new value*/xthread, Assembler::int64, Assembler::aq, +++ Assembler::rl, /*result*/flag); // cas succeeds if flag == zr(expected) +++ +++ // Store a non-null value into the box to avoid looking like a re-entrant +++ // lock. The fast-path monitor unlock code checks for +++ // markWord::monitor_value so use markWord::unused_mark which has the +++ // relevant bit set, and also matches ObjectSynchronizer::slow_enter. +++ __ mv(tmp, (address)markWord::unused_mark().value()); +++ __ sd(tmp, Address(box, BasicLock::displaced_header_offset_in_bytes())); +++ +++ __ beqz(flag, cont); // CAS success means locking succeeded +++ +++ __ bne(flag, xthread, cont); // Check for recursive locking +++ +++ // Recursive lock case +++ __ mv(flag, zr); +++ __ increment(Address(disp_hdr, ObjectMonitor::recursions_offset_in_bytes() - markWord::monitor_value), 1, t0, tmp); +++ +++ __ bind(cont); +++ %} +++ +++ // using cr flag to indicate the fast_unlock result: 0 for success; others failed. +++ enc_class riscv_enc_fast_unlock(iRegP object, iRegP box, iRegPNoSp tmp1, iRegPNoSp tmp2) %{ +++ C2_MacroAssembler _masm(&cbuf); +++ Register flag = t1; +++ Register oop = as_Register($object$$reg); +++ Register box = as_Register($box$$reg); +++ Register disp_hdr = as_Register($tmp1$$reg); +++ Register tmp = as_Register($tmp2$$reg); +++ Label cont; +++ Label object_has_monitor; +++ +++ assert_different_registers(oop, box, tmp, disp_hdr, flag); +++ +++ if (UseBiasedLocking && !UseOptoBiasInlining) { +++ __ biased_locking_exit(oop, tmp, cont, flag); +++ } +++ +++ // Find the lock address and load the displaced header from the stack. +++ __ ld(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes())); +++ +++ // If the displaced header is 0, we have a recursive unlock. +++ __ mv(flag, disp_hdr); +++ __ beqz(disp_hdr, cont); +++ +++ // Handle existing monitor. +++ __ ld(tmp, Address(oop, oopDesc::mark_offset_in_bytes())); +++ __ test_bit(t0, tmp, exact_log2(markWord::monitor_value)); +++ __ bnez(t0, object_has_monitor); +++ +++ // Check if it is still a light weight lock, this is true if we +++ // see the stack address of the basicLock in the markWord of the +++ // object. +++ +++ __ cmpxchg(/*memory address*/oop, /*expected value*/box, /*new value*/disp_hdr, Assembler::int64, Assembler::relaxed, +++ Assembler::rl, /*result*/tmp); +++ __ xorr(flag, box, tmp); // box == tmp if cas succeeds +++ __ j(cont); +++ +++ assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0"); +++ +++ // Handle existing monitor. +++ __ bind(object_has_monitor); +++ STATIC_ASSERT(markWord::monitor_value <= INT_MAX); +++ __ add(tmp, tmp, -(int)markWord::monitor_value); // monitor +++ __ ld(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset_in_bytes())); +++ +++ Label notRecursive; +++ __ beqz(disp_hdr, notRecursive); // Will be 0 if not recursive. +++ +++ // Recursive lock +++ __ addi(disp_hdr, disp_hdr, -1); +++ __ sd(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset_in_bytes())); +++ __ mv(flag, zr); +++ __ j(cont); +++ +++ __ bind(notRecursive); +++ __ ld(flag, Address(tmp, ObjectMonitor::EntryList_offset_in_bytes())); +++ __ ld(disp_hdr, Address(tmp, ObjectMonitor::cxq_offset_in_bytes())); +++ __ orr(flag, flag, disp_hdr); // Will be 0 if both are 0. +++ __ bnez(flag, cont); +++ // need a release store here +++ __ la(tmp, Address(tmp, ObjectMonitor::owner_offset_in_bytes())); +++ __ membar(MacroAssembler::LoadStore | MacroAssembler::StoreStore); +++ __ sd(zr, Address(tmp)); // set unowned +++ +++ __ bind(cont); +++ %} +++ +++ // arithmetic encodings +++ +++ enc_class riscv_enc_divw(iRegI dst, iRegI src1, iRegI src2) %{ +++ C2_MacroAssembler _masm(&cbuf); +++ Register dst_reg = as_Register($dst$$reg); +++ Register src1_reg = as_Register($src1$$reg); +++ Register src2_reg = as_Register($src2$$reg); +++ __ corrected_idivl(dst_reg, src1_reg, src2_reg, false); +++ %} +++ +++ enc_class riscv_enc_div(iRegI dst, iRegI src1, iRegI src2) %{ +++ C2_MacroAssembler _masm(&cbuf); +++ Register dst_reg = as_Register($dst$$reg); +++ Register src1_reg = as_Register($src1$$reg); +++ Register src2_reg = as_Register($src2$$reg); +++ __ corrected_idivq(dst_reg, src1_reg, src2_reg, false); +++ %} +++ +++ enc_class riscv_enc_modw(iRegI dst, iRegI src1, iRegI src2) %{ +++ C2_MacroAssembler _masm(&cbuf); +++ Register dst_reg = as_Register($dst$$reg); +++ Register src1_reg = as_Register($src1$$reg); +++ Register src2_reg = as_Register($src2$$reg); +++ __ corrected_idivl(dst_reg, src1_reg, src2_reg, true); +++ %} +++ +++ enc_class riscv_enc_mod(iRegI dst, iRegI src1, iRegI src2) %{ +++ C2_MacroAssembler _masm(&cbuf); +++ Register dst_reg = as_Register($dst$$reg); +++ Register src1_reg = as_Register($src1$$reg); +++ Register src2_reg = as_Register($src2$$reg); +++ __ corrected_idivq(dst_reg, src1_reg, src2_reg, true); +++ %} +++ +++ enc_class riscv_enc_tail_call(iRegP jump_target) %{ +++ C2_MacroAssembler _masm(&cbuf); +++ Register target_reg = as_Register($jump_target$$reg); +++ __ jr(target_reg); +++ %} +++ +++ enc_class riscv_enc_tail_jmp(iRegP jump_target) %{ +++ C2_MacroAssembler _masm(&cbuf); +++ Register target_reg = as_Register($jump_target$$reg); +++ // exception oop should be in x10 +++ // ret addr has been popped into ra +++ // callee expects it in x13 +++ __ mv(x13, ra); +++ __ jr(target_reg); +++ %} +++ +++ enc_class riscv_enc_rethrow() %{ +++ C2_MacroAssembler _masm(&cbuf); +++ __ far_jump(RuntimeAddress(OptoRuntime::rethrow_stub())); +++ %} +++ +++ enc_class riscv_enc_ret() %{ +++ C2_MacroAssembler _masm(&cbuf); +++ __ ret(); +++ %} +++ +++%} +++ +++//----------FRAME-------------------------------------------------------------- +++// Definition of frame structure and management information. +++// +++// S T A C K L A Y O U T Allocators stack-slot number +++// | (to get allocators register number +++// G Owned by | | v add OptoReg::stack0()) +++// r CALLER | | +++// o | +--------+ pad to even-align allocators stack-slot +++// w V | pad0 | numbers; owned by CALLER +++// t -----------+--------+----> Matcher::_in_arg_limit, unaligned +++// h ^ | in | 5 +++// | | args | 4 Holes in incoming args owned by SELF +++// | | | | 3 +++// | | +--------+ +++// V | | old out| Empty on Intel, window on Sparc +++// | old |preserve| Must be even aligned. +++// | SP-+--------+----> Matcher::_old_SP, even aligned +++// | | in | 3 area for Intel ret address +++// Owned by |preserve| Empty on Sparc. +++// SELF +--------+ +++// | | pad2 | 2 pad to align old SP +++// | +--------+ 1 +++// | | locks | 0 +++// | +--------+----> OptoReg::stack0(), even aligned +++// | | pad1 | 11 pad to align new SP +++// | +--------+ +++// | | | 10 +++// | | spills | 9 spills +++// V | | 8 (pad0 slot for callee) +++// -----------+--------+----> Matcher::_out_arg_limit, unaligned +++// ^ | out | 7 +++// | | args | 6 Holes in outgoing args owned by CALLEE +++// Owned by +--------+ +++// CALLEE | new out| 6 Empty on Intel, window on Sparc +++// | new |preserve| Must be even-aligned. +++// | SP-+--------+----> Matcher::_new_SP, even aligned +++// | | | +++// +++// Note 1: Only region 8-11 is determined by the allocator. Region 0-5 is +++// known from SELF's arguments and the Java calling convention. +++// Region 6-7 is determined per call site. +++// Note 2: If the calling convention leaves holes in the incoming argument +++// area, those holes are owned by SELF. Holes in the outgoing area +++// are owned by the CALLEE. Holes should not be nessecary in the +++// incoming area, as the Java calling convention is completely under +++// the control of the AD file. Doubles can be sorted and packed to +++// avoid holes. Holes in the outgoing arguments may be nessecary for +++// varargs C calling conventions. +++// Note 3: Region 0-3 is even aligned, with pad2 as needed. Region 3-5 is +++// even aligned with pad0 as needed. +++// Region 6 is even aligned. Region 6-7 is NOT even aligned; +++// (the latter is true on Intel but is it false on RISCV?) +++// region 6-11 is even aligned; it may be padded out more so that +++// the region from SP to FP meets the minimum stack alignment. +++// Note 4: For I2C adapters, the incoming FP may not meet the minimum stack +++// alignment. Region 11, pad1, may be dynamically extended so that +++// SP meets the minimum alignment. +++ +++frame %{ +++ // These three registers define part of the calling convention +++ // between compiled code and the interpreter. +++ +++ // Inline Cache Register or methodOop for I2C. +++ inline_cache_reg(R31); +++ +++ // Optional: name the operand used by cisc-spilling to access [stack_pointer + offset] +++ cisc_spilling_operand_name(indOffset); +++ +++ // Number of stack slots consumed by locking an object +++ // generate Compile::sync_stack_slots +++ // VMRegImpl::slots_per_word = wordSize / stack_slot_size = 8 / 4 = 2 +++ sync_stack_slots(1 * VMRegImpl::slots_per_word); +++ +++ // Compiled code's Frame Pointer +++ frame_pointer(R2); +++ +++ // Interpreter stores its frame pointer in a register which is +++ // stored to the stack by I2CAdaptors. +++ // I2CAdaptors convert from interpreted java to compiled java. +++ interpreter_frame_pointer(R8); +++ +++ // Stack alignment requirement +++ stack_alignment(StackAlignmentInBytes); // Alignment size in bytes (128-bit -> 16 bytes) +++ +++ // Number of outgoing stack slots killed above the out_preserve_stack_slots +++ // for calls to C. Supports the var-args backing area for register parms. +++ varargs_C_out_slots_killed(frame::arg_reg_save_area_bytes / BytesPerInt); +++ +++ // The after-PROLOG location of the return address. Location of +++ // return address specifies a type (REG or STACK) and a number +++ // representing the register number (i.e. - use a register name) or +++ // stack slot. +++ // Ret Addr is on stack in slot 0 if no locks or verification or alignment. +++ // Otherwise, it is above the locks and verification slot and alignment word +++ // TODO this may well be correct but need to check why that - 2 is there +++ // ppc port uses 0 but we definitely need to allow for fixed_slots +++ // which folds in the space used for monitors +++ return_addr(STACK - 2 + +++ align_up((Compile::current()->in_preserve_stack_slots() + +++ Compile::current()->fixed_slots()), +++ stack_alignment_in_slots())); +++ +++ // Location of compiled Java return values. Same as C for now. +++ return_value +++ %{ +++ assert(ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, +++ "only return normal values"); +++ +++ static const int lo[Op_RegL + 1] = { // enum name +++ 0, // Op_Node +++ 0, // Op_Set +++ R10_num, // Op_RegN +++ R10_num, // Op_RegI +++ R10_num, // Op_RegP +++ F10_num, // Op_RegF +++ F10_num, // Op_RegD +++ R10_num // Op_RegL +++ }; +++ +++ static const int hi[Op_RegL + 1] = { // enum name +++ 0, // Op_Node +++ 0, // Op_Set +++ OptoReg::Bad, // Op_RegN +++ OptoReg::Bad, // Op_RegI +++ R10_H_num, // Op_RegP +++ OptoReg::Bad, // Op_RegF +++ F10_H_num, // Op_RegD +++ R10_H_num // Op_RegL +++ }; +++ +++ return OptoRegPair(hi[ideal_reg], lo[ideal_reg]); +++ %} +++%} +++ +++//----------ATTRIBUTES--------------------------------------------------------- +++//----------Operand Attributes------------------------------------------------- +++op_attrib op_cost(1); // Required cost attribute +++ +++//----------Instruction Attributes--------------------------------------------- +++ins_attrib ins_cost(DEFAULT_COST); // Required cost attribute +++ins_attrib ins_size(32); // Required size attribute (in bits) +++ins_attrib ins_short_branch(0); // Required flag: is this instruction +++ // a non-matching short branch variant +++ // of some long branch? +++ins_attrib ins_alignment(4); // Required alignment attribute (must +++ // be a power of 2) specifies the +++ // alignment that some part of the +++ // instruction (not necessarily the +++ // start) requires. If > 1, a +++ // compute_padding() function must be +++ // provided for the instruction +++ +++//----------OPERANDS----------------------------------------------------------- +++// Operand definitions must precede instruction definitions for correct parsing +++// in the ADLC because operands constitute user defined types which are used in +++// instruction definitions. +++ +++//----------Simple Operands---------------------------------------------------- +++ +++// Integer operands 32 bit +++// 32 bit immediate +++operand immI() +++%{ +++ match(ConI); +++ +++ op_cost(0); +++ format %{ %} +++ interface(CONST_INTER); +++%} +++ +++// 32 bit zero +++operand immI0() +++%{ +++ predicate(n->get_int() == 0); +++ match(ConI); +++ +++ op_cost(0); +++ format %{ %} +++ interface(CONST_INTER); +++%} +++ +++// 32 bit unit increment +++operand immI_1() +++%{ +++ predicate(n->get_int() == 1); +++ match(ConI); +++ +++ op_cost(0); +++ format %{ %} +++ interface(CONST_INTER); +++%} +++ +++// 32 bit unit decrement +++operand immI_M1() +++%{ +++ predicate(n->get_int() == -1); +++ match(ConI); +++ +++ op_cost(0); +++ format %{ %} +++ interface(CONST_INTER); +++%} +++ +++// Unsigned Integer Immediate: 6-bit int, greater than 32 +++operand uimmI6_ge32() %{ +++ predicate(((unsigned int)(n->get_int()) < 64) && (n->get_int() >= 32)); +++ match(ConI); +++ op_cost(0); +++ format %{ %} +++ interface(CONST_INTER); +++%} +++ +++operand immI_le_4() +++%{ +++ predicate(n->get_int() <= 4); +++ match(ConI); +++ +++ op_cost(0); +++ format %{ %} +++ interface(CONST_INTER); +++%} +++ +++operand immI_16() +++%{ +++ predicate(n->get_int() == 16); +++ match(ConI); +++ op_cost(0); +++ format %{ %} +++ interface(CONST_INTER); +++%} +++ +++operand immI_24() +++%{ +++ predicate(n->get_int() == 24); +++ match(ConI); +++ op_cost(0); +++ format %{ %} +++ interface(CONST_INTER); +++%} +++ +++operand immI_31() +++%{ +++ predicate(n->get_int() == 31); +++ match(ConI); +++ +++ op_cost(0); +++ format %{ %} +++ interface(CONST_INTER); +++%} +++ +++operand immI_63() +++%{ +++ predicate(n->get_int() == 63); +++ match(ConI); +++ +++ op_cost(0); +++ format %{ %} +++ interface(CONST_INTER); +++%} +++ +++// 32 bit integer valid for add immediate +++operand immIAdd() +++%{ +++ predicate(Assembler::is_simm12((int64_t)n->get_int())); +++ match(ConI); +++ op_cost(0); +++ format %{ %} +++ interface(CONST_INTER); +++%} +++ +++// 32 bit integer valid for sub immediate +++operand immISub() +++%{ +++ predicate(Assembler::is_simm12(-(int64_t)n->get_int())); +++ match(ConI); +++ op_cost(0); +++ format %{ %} +++ interface(CONST_INTER); +++%} +++ +++// 5 bit signed value. +++operand immI5() +++%{ +++ predicate(n->get_int() <= 15 && n->get_int() >= -16); +++ match(ConI); +++ +++ op_cost(0); +++ format %{ %} +++ interface(CONST_INTER); +++%} +++ +++// 5 bit signed value (simm5) +++operand immL5() +++%{ +++ predicate(n->get_long() <= 15 && n->get_long() >= -16); +++ match(ConL); +++ +++ op_cost(0); +++ format %{ %} +++ interface(CONST_INTER); +++%} +++ +++// Integer operands 64 bit +++// 64 bit immediate +++operand immL() +++%{ +++ match(ConL); +++ +++ op_cost(0); +++ format %{ %} +++ interface(CONST_INTER); +++%} +++ +++// 64 bit zero +++operand immL0() +++%{ +++ predicate(n->get_long() == 0); +++ match(ConL); +++ +++ op_cost(0); +++ format %{ %} +++ interface(CONST_INTER); +++%} +++ +++// Pointer operands +++// Pointer Immediate +++operand immP() +++%{ +++ match(ConP); +++ +++ op_cost(0); +++ format %{ %} +++ interface(CONST_INTER); +++%} +++ +++// NULL Pointer Immediate +++operand immP0() +++%{ +++ predicate(n->get_ptr() == 0); +++ match(ConP); +++ +++ op_cost(0); +++ format %{ %} +++ interface(CONST_INTER); +++%} +++ +++// Pointer Immediate One +++// this is used in object initialization (initial object header) +++operand immP_1() +++%{ +++ predicate(n->get_ptr() == 1); +++ match(ConP); +++ +++ op_cost(0); +++ format %{ %} +++ interface(CONST_INTER); +++%} +++ +++// Card Table Byte Map Base +++operand immByteMapBase() +++%{ +++ // Get base of card map +++ predicate(BarrierSet::barrier_set()->is_a(BarrierSet::CardTableBarrierSet) && +++ (CardTable::CardValue*)n->get_ptr() == +++ ((CardTableBarrierSet*)(BarrierSet::barrier_set()))->card_table()->byte_map_base()); +++ match(ConP); +++ +++ op_cost(0); +++ format %{ %} +++ interface(CONST_INTER); +++%} +++ +++// Int Immediate: low 16-bit mask +++operand immI_16bits() +++%{ +++ predicate(n->get_int() == 0xFFFF); +++ match(ConI); +++ op_cost(0); +++ format %{ %} +++ interface(CONST_INTER); +++%} +++ +++operand immIpowerOf2() %{ +++ predicate(is_power_of_2((juint)(n->get_int()))); +++ match(ConI); +++ op_cost(0); +++ format %{ %} +++ interface(CONST_INTER); +++%} +++ +++// Long Immediate: low 32-bit mask +++operand immL_32bits() +++%{ +++ predicate(n->get_long() == 0xFFFFFFFFL); +++ match(ConL); +++ op_cost(0); +++ format %{ %} +++ interface(CONST_INTER); +++%} +++ +++// 64 bit unit decrement +++operand immL_M1() +++%{ +++ predicate(n->get_long() == -1); +++ match(ConL); +++ +++ op_cost(0); +++ format %{ %} +++ interface(CONST_INTER); +++%} +++ +++ +++// 32 bit offset of pc in thread anchor +++ +++operand immL_pc_off() +++%{ +++ predicate(n->get_long() == in_bytes(JavaThread::frame_anchor_offset()) + +++ in_bytes(JavaFrameAnchor::last_Java_pc_offset())); +++ match(ConL); +++ +++ op_cost(0); +++ format %{ %} +++ interface(CONST_INTER); +++%} +++ +++// 64 bit integer valid for add immediate +++operand immLAdd() +++%{ +++ predicate(Assembler::is_simm12(n->get_long())); +++ match(ConL); +++ op_cost(0); +++ format %{ %} +++ interface(CONST_INTER); +++%} +++ +++// 64 bit integer valid for sub immediate +++operand immLSub() +++%{ +++ predicate(Assembler::is_simm12(-(n->get_long()))); +++ match(ConL); +++ op_cost(0); +++ format %{ %} +++ interface(CONST_INTER); +++%} +++ +++// Narrow pointer operands +++// Narrow Pointer Immediate +++operand immN() +++%{ +++ match(ConN); +++ +++ op_cost(0); +++ format %{ %} +++ interface(CONST_INTER); +++%} +++ +++// Narrow NULL Pointer Immediate +++operand immN0() +++%{ +++ predicate(n->get_narrowcon() == 0); +++ match(ConN); +++ +++ op_cost(0); +++ format %{ %} +++ interface(CONST_INTER); +++%} +++ +++operand immNKlass() +++%{ +++ match(ConNKlass); +++ +++ op_cost(0); +++ format %{ %} +++ interface(CONST_INTER); +++%} +++ +++// Float and Double operands +++// Double Immediate +++operand immD() +++%{ +++ match(ConD); +++ op_cost(0); +++ format %{ %} +++ interface(CONST_INTER); +++%} +++ +++// Double Immediate: +0.0d +++operand immD0() +++%{ +++ predicate(jlong_cast(n->getd()) == 0); +++ match(ConD); +++ +++ op_cost(0); +++ format %{ %} +++ interface(CONST_INTER); +++%} +++ +++// Float Immediate +++operand immF() +++%{ +++ match(ConF); +++ op_cost(0); +++ format %{ %} +++ interface(CONST_INTER); +++%} +++ +++// Float Immediate: +0.0f. +++operand immF0() +++%{ +++ predicate(jint_cast(n->getf()) == 0); +++ match(ConF); +++ +++ op_cost(0); +++ format %{ %} +++ interface(CONST_INTER); +++%} +++ +++operand immIOffset() +++%{ +++ predicate(Assembler::is_simm12(n->get_int())); +++ match(ConI); +++ op_cost(0); +++ format %{ %} +++ interface(CONST_INTER); +++%} +++ +++operand immLOffset() +++%{ +++ predicate(Assembler::is_simm12(n->get_long())); +++ match(ConL); +++ op_cost(0); +++ format %{ %} +++ interface(CONST_INTER); +++%} +++ +++// Scale values +++operand immIScale() +++%{ +++ predicate(1 <= n->get_int() && (n->get_int() <= 3)); +++ match(ConI); +++ +++ op_cost(0); +++ format %{ %} +++ interface(CONST_INTER); +++%} +++ +++// Integer 32 bit Register Operands +++operand iRegI() +++%{ +++ constraint(ALLOC_IN_RC(any_reg32)); +++ match(RegI); +++ match(iRegINoSp); +++ op_cost(0); +++ format %{ %} +++ interface(REG_INTER); +++%} +++ +++// Integer 32 bit Register not Special +++operand iRegINoSp() +++%{ +++ constraint(ALLOC_IN_RC(no_special_reg32)); +++ match(RegI); +++ op_cost(0); +++ format %{ %} +++ interface(REG_INTER); +++%} +++ +++// Register R10 only +++operand iRegI_R10() +++%{ +++ constraint(ALLOC_IN_RC(int_r10_reg)); +++ match(RegI); +++ match(iRegINoSp); +++ op_cost(0); +++ format %{ %} +++ interface(REG_INTER); +++%} +++ +++// Register R12 only +++operand iRegI_R12() +++%{ +++ constraint(ALLOC_IN_RC(int_r12_reg)); +++ match(RegI); +++ match(iRegINoSp); +++ op_cost(0); +++ format %{ %} +++ interface(REG_INTER); +++%} +++ +++// Register R13 only +++operand iRegI_R13() +++%{ +++ constraint(ALLOC_IN_RC(int_r13_reg)); +++ match(RegI); +++ match(iRegINoSp); +++ op_cost(0); +++ format %{ %} +++ interface(REG_INTER); +++%} +++ +++// Register R14 only +++operand iRegI_R14() +++%{ +++ constraint(ALLOC_IN_RC(int_r14_reg)); +++ match(RegI); +++ match(iRegINoSp); +++ op_cost(0); +++ format %{ %} +++ interface(REG_INTER); +++%} +++ +++// Integer 64 bit Register Operands +++operand iRegL() +++%{ +++ constraint(ALLOC_IN_RC(any_reg)); +++ match(RegL); +++ match(iRegLNoSp); +++ op_cost(0); +++ format %{ %} +++ interface(REG_INTER); +++%} +++ +++// Integer 64 bit Register not Special +++operand iRegLNoSp() +++%{ +++ constraint(ALLOC_IN_RC(no_special_reg)); +++ match(RegL); +++ match(iRegL_R10); +++ format %{ %} +++ interface(REG_INTER); +++%} +++ +++// Long 64 bit Register R28 only +++operand iRegL_R28() +++%{ +++ constraint(ALLOC_IN_RC(r28_reg)); +++ match(RegL); +++ match(iRegLNoSp); +++ op_cost(0); +++ format %{ %} +++ interface(REG_INTER); +++%} +++ +++// Long 64 bit Register R29 only +++operand iRegL_R29() +++%{ +++ constraint(ALLOC_IN_RC(r29_reg)); +++ match(RegL); +++ match(iRegLNoSp); +++ op_cost(0); +++ format %{ %} +++ interface(REG_INTER); +++%} +++ +++// Long 64 bit Register R30 only +++operand iRegL_R30() +++%{ +++ constraint(ALLOC_IN_RC(r30_reg)); +++ match(RegL); +++ match(iRegLNoSp); +++ op_cost(0); +++ format %{ %} +++ interface(REG_INTER); +++%} +++ +++// Pointer Register Operands +++// Pointer Register +++operand iRegP() +++%{ +++ constraint(ALLOC_IN_RC(ptr_reg)); +++ match(RegP); +++ match(iRegPNoSp); +++ match(iRegP_R10); +++ match(iRegP_R15); +++ match(javaThread_RegP); +++ op_cost(0); +++ format %{ %} +++ interface(REG_INTER); +++%} +++ +++// Pointer 64 bit Register not Special +++operand iRegPNoSp() +++%{ +++ constraint(ALLOC_IN_RC(no_special_ptr_reg)); +++ match(RegP); +++ op_cost(0); +++ format %{ %} +++ interface(REG_INTER); +++%} +++ +++operand iRegP_R10() +++%{ +++ constraint(ALLOC_IN_RC(r10_reg)); +++ match(RegP); +++ // match(iRegP); +++ match(iRegPNoSp); +++ op_cost(0); +++ format %{ %} +++ interface(REG_INTER); +++%} +++ +++// Pointer 64 bit Register R11 only +++operand iRegP_R11() +++%{ +++ constraint(ALLOC_IN_RC(r11_reg)); +++ match(RegP); +++ match(iRegPNoSp); +++ op_cost(0); +++ format %{ %} +++ interface(REG_INTER); +++%} +++ +++operand iRegP_R12() +++%{ +++ constraint(ALLOC_IN_RC(r12_reg)); +++ match(RegP); +++ // match(iRegP); +++ match(iRegPNoSp); +++ op_cost(0); +++ format %{ %} +++ interface(REG_INTER); +++%} +++ +++// Pointer 64 bit Register R13 only +++operand iRegP_R13() +++%{ +++ constraint(ALLOC_IN_RC(r13_reg)); +++ match(RegP); +++ match(iRegPNoSp); +++ op_cost(0); +++ format %{ %} +++ interface(REG_INTER); +++%} +++ +++operand iRegP_R14() +++%{ +++ constraint(ALLOC_IN_RC(r14_reg)); +++ match(RegP); +++ // match(iRegP); +++ match(iRegPNoSp); +++ op_cost(0); +++ format %{ %} +++ interface(REG_INTER); +++%} +++ +++operand iRegP_R15() +++%{ +++ constraint(ALLOC_IN_RC(r15_reg)); +++ match(RegP); +++ // match(iRegP); +++ match(iRegPNoSp); +++ op_cost(0); +++ format %{ %} +++ interface(REG_INTER); +++%} +++ +++operand iRegP_R16() +++%{ +++ constraint(ALLOC_IN_RC(r16_reg)); +++ match(RegP); +++ // match(iRegP); +++ match(iRegPNoSp); +++ op_cost(0); +++ format %{ %} +++ interface(REG_INTER); +++%} +++ +++// Pointer 64 bit Register R28 only +++operand iRegP_R28() +++%{ +++ constraint(ALLOC_IN_RC(r28_reg)); +++ match(RegP); +++ match(iRegPNoSp); +++ op_cost(0); +++ format %{ %} +++ interface(REG_INTER); +++%} +++ +++// Pointer Register Operands +++// Narrow Pointer Register +++operand iRegN() +++%{ +++ constraint(ALLOC_IN_RC(any_reg32)); +++ match(RegN); +++ match(iRegNNoSp); +++ op_cost(0); +++ format %{ %} +++ interface(REG_INTER); +++%} +++ +++// Integer 64 bit Register not Special +++operand iRegNNoSp() +++%{ +++ constraint(ALLOC_IN_RC(no_special_reg32)); +++ match(RegN); +++ op_cost(0); +++ format %{ %} +++ interface(REG_INTER); +++%} +++ +++// heap base register -- used for encoding immN0 +++operand iRegIHeapbase() +++%{ +++ constraint(ALLOC_IN_RC(heapbase_reg)); +++ match(RegI); +++ op_cost(0); +++ format %{ %} +++ interface(REG_INTER); +++%} +++ +++// Long 64 bit Register R10 only +++operand iRegL_R10() +++%{ +++ constraint(ALLOC_IN_RC(r10_reg)); +++ match(RegL); +++ match(iRegLNoSp); +++ op_cost(0); +++ format %{ %} +++ interface(REG_INTER); +++%} +++ +++// Float Register +++// Float register operands +++operand fRegF() +++%{ +++ constraint(ALLOC_IN_RC(float_reg)); +++ match(RegF); +++ +++ op_cost(0); +++ format %{ %} +++ interface(REG_INTER); +++%} +++ +++// Double Register +++// Double register operands +++operand fRegD() +++%{ +++ constraint(ALLOC_IN_RC(double_reg)); +++ match(RegD); +++ +++ op_cost(0); +++ format %{ %} +++ interface(REG_INTER); +++%} +++ +++// Generic vector class. This will be used for +++// all vector operands. +++operand vReg() +++%{ +++ constraint(ALLOC_IN_RC(vectora_reg)); +++ match(VecA); +++ op_cost(0); +++ format %{ %} +++ interface(REG_INTER); +++%} +++ +++operand vReg_V1() +++%{ +++ constraint(ALLOC_IN_RC(v1_reg)); +++ match(VecA); +++ match(vReg); +++ op_cost(0); +++ format %{ %} +++ interface(REG_INTER); +++%} +++ +++operand vReg_V2() +++%{ +++ constraint(ALLOC_IN_RC(v2_reg)); +++ match(VecA); +++ match(vReg); +++ op_cost(0); +++ format %{ %} +++ interface(REG_INTER); +++%} +++ +++operand vReg_V3() +++%{ +++ constraint(ALLOC_IN_RC(v3_reg)); +++ match(VecA); +++ match(vReg); +++ op_cost(0); +++ format %{ %} +++ interface(REG_INTER); +++%} +++ +++operand vReg_V4() +++%{ +++ constraint(ALLOC_IN_RC(v4_reg)); +++ match(VecA); +++ match(vReg); +++ op_cost(0); +++ format %{ %} +++ interface(REG_INTER); +++%} +++ +++operand vReg_V5() +++%{ +++ constraint(ALLOC_IN_RC(v5_reg)); +++ match(VecA); +++ match(vReg); +++ op_cost(0); +++ format %{ %} +++ interface(REG_INTER); +++%} +++ +++// Java Thread Register +++operand javaThread_RegP(iRegP reg) +++%{ +++ constraint(ALLOC_IN_RC(java_thread_reg)); // java_thread_reg +++ match(reg); +++ op_cost(0); +++ format %{ %} +++ interface(REG_INTER); +++%} +++ +++//----------Memory Operands---------------------------------------------------- +++// RISCV has only base_plus_offset and literal address mode, so no need to use +++// index and scale. Here set index as 0xffffffff and scale as 0x0. +++operand indirect(iRegP reg) +++%{ +++ constraint(ALLOC_IN_RC(ptr_reg)); +++ match(reg); +++ op_cost(0); +++ format %{ "[$reg]" %} +++ interface(MEMORY_INTER) %{ +++ base($reg); +++ index(0xffffffff); +++ scale(0x0); +++ disp(0x0); +++ %} +++%} +++ +++operand indOffI(iRegP reg, immIOffset off) +++%{ +++ constraint(ALLOC_IN_RC(ptr_reg)); +++ match(AddP reg off); +++ op_cost(0); +++ format %{ "[$reg, $off]" %} +++ interface(MEMORY_INTER) %{ +++ base($reg); +++ index(0xffffffff); +++ scale(0x0); +++ disp($off); +++ %} +++%} +++ +++operand indOffL(iRegP reg, immLOffset off) +++%{ +++ constraint(ALLOC_IN_RC(ptr_reg)); +++ match(AddP reg off); +++ op_cost(0); +++ format %{ "[$reg, $off]" %} +++ interface(MEMORY_INTER) %{ +++ base($reg); +++ index(0xffffffff); +++ scale(0x0); +++ disp($off); +++ %} +++%} +++ +++operand indirectN(iRegN reg) +++%{ +++ predicate(CompressedOops::shift() == 0); +++ constraint(ALLOC_IN_RC(ptr_reg)); +++ match(DecodeN reg); +++ op_cost(0); +++ format %{ "[$reg]\t# narrow" %} +++ interface(MEMORY_INTER) %{ +++ base($reg); +++ index(0xffffffff); +++ scale(0x0); +++ disp(0x0); +++ %} +++%} +++ +++operand indOffIN(iRegN reg, immIOffset off) +++%{ +++ predicate(CompressedOops::shift() == 0); +++ constraint(ALLOC_IN_RC(ptr_reg)); +++ match(AddP (DecodeN reg) off); +++ op_cost(0); +++ format %{ "[$reg, $off]\t# narrow" %} +++ interface(MEMORY_INTER) %{ +++ base($reg); +++ index(0xffffffff); +++ scale(0x0); +++ disp($off); +++ %} +++%} +++ +++operand indOffLN(iRegN reg, immLOffset off) +++%{ +++ predicate(CompressedOops::shift() == 0); +++ constraint(ALLOC_IN_RC(ptr_reg)); +++ match(AddP (DecodeN reg) off); +++ op_cost(0); +++ format %{ "[$reg, $off]\t# narrow" %} +++ interface(MEMORY_INTER) %{ +++ base($reg); +++ index(0xffffffff); +++ scale(0x0); +++ disp($off); +++ %} +++%} +++ +++// RISCV opto stubs need to write to the pc slot in the thread anchor +++operand thread_anchor_pc(javaThread_RegP reg, immL_pc_off off) +++%{ +++ constraint(ALLOC_IN_RC(ptr_reg)); +++ match(AddP reg off); +++ op_cost(0); +++ format %{ "[$reg, $off]" %} +++ interface(MEMORY_INTER) %{ +++ base($reg); +++ index(0xffffffff); +++ scale(0x0); +++ disp($off); +++ %} +++%} +++ +++ +++//----------Special Memory Operands-------------------------------------------- +++// Stack Slot Operand - This operand is used for loading and storing temporary +++// values on the stack where a match requires a value to +++// flow through memory. +++operand stackSlotI(sRegI reg) +++%{ +++ constraint(ALLOC_IN_RC(stack_slots)); +++ // No match rule because this operand is only generated in matching +++ // match(RegI); +++ format %{ "[$reg]" %} +++ interface(MEMORY_INTER) %{ +++ base(0x02); // RSP +++ index(0xffffffff); // No Index +++ scale(0x0); // No Scale +++ disp($reg); // Stack Offset +++ %} +++%} +++ +++operand stackSlotF(sRegF reg) +++%{ +++ constraint(ALLOC_IN_RC(stack_slots)); +++ // No match rule because this operand is only generated in matching +++ // match(RegF); +++ format %{ "[$reg]" %} +++ interface(MEMORY_INTER) %{ +++ base(0x02); // RSP +++ index(0xffffffff); // No Index +++ scale(0x0); // No Scale +++ disp($reg); // Stack Offset +++ %} +++%} +++ +++operand stackSlotD(sRegD reg) +++%{ +++ constraint(ALLOC_IN_RC(stack_slots)); +++ // No match rule because this operand is only generated in matching +++ // match(RegD); +++ format %{ "[$reg]" %} +++ interface(MEMORY_INTER) %{ +++ base(0x02); // RSP +++ index(0xffffffff); // No Index +++ scale(0x0); // No Scale +++ disp($reg); // Stack Offset +++ %} +++%} +++ +++operand stackSlotL(sRegL reg) +++%{ +++ constraint(ALLOC_IN_RC(stack_slots)); +++ // No match rule because this operand is only generated in matching +++ // match(RegL); +++ format %{ "[$reg]" %} +++ interface(MEMORY_INTER) %{ +++ base(0x02); // RSP +++ index(0xffffffff); // No Index +++ scale(0x0); // No Scale +++ disp($reg); // Stack Offset +++ %} +++%} +++ +++// Special operand allowing long args to int ops to be truncated for free +++ +++operand iRegL2I(iRegL reg) %{ +++ +++ op_cost(0); +++ +++ match(ConvL2I reg); +++ +++ format %{ "l2i($reg)" %} +++ +++ interface(REG_INTER) +++%} +++ +++ +++// Comparison Operands +++// NOTE: Label is a predefined operand which should not be redefined in +++// the AD file. It is generically handled within the ADLC. +++ +++//----------Conditional Branch Operands---------------------------------------- +++// Comparison Op - This is the operation of the comparison, and is limited to +++// the following set of codes: +++// L (<), LE (<=), G (>), GE (>=), E (==), NE (!=) +++// +++// Other attributes of the comparison, such as unsignedness, are specified +++// by the comparison instruction that sets a condition code flags register. +++// That result is represented by a flags operand whose subtype is appropriate +++// to the unsignedness (etc.) of the comparison. +++// +++// Later, the instruction which matches both the Comparison Op (a Bool) and +++// the flags (produced by the Cmp) specifies the coding of the comparison op +++// by matching a specific subtype of Bool operand below, such as cmpOpU. +++ +++ +++// used for signed integral comparisons and fp comparisons +++operand cmpOp() +++%{ +++ match(Bool); +++ +++ format %{ "" %} +++ +++ // the values in interface derives from struct BoolTest::mask +++ interface(COND_INTER) %{ +++ equal(0x0, "eq"); +++ greater(0x1, "gt"); +++ overflow(0x2, "overflow"); +++ less(0x3, "lt"); +++ not_equal(0x4, "ne"); +++ less_equal(0x5, "le"); +++ no_overflow(0x6, "no_overflow"); +++ greater_equal(0x7, "ge"); +++ %} +++%} +++ +++// used for unsigned integral comparisons +++operand cmpOpU() +++%{ +++ match(Bool); +++ +++ format %{ "" %} +++ // the values in interface derives from struct BoolTest::mask +++ interface(COND_INTER) %{ +++ equal(0x0, "eq"); +++ greater(0x1, "gtu"); +++ overflow(0x2, "overflow"); +++ less(0x3, "ltu"); +++ not_equal(0x4, "ne"); +++ less_equal(0x5, "leu"); +++ no_overflow(0x6, "no_overflow"); +++ greater_equal(0x7, "geu"); +++ %} +++%} +++ +++// used for certain integral comparisons which can be +++// converted to bxx instructions +++operand cmpOpEqNe() +++%{ +++ match(Bool); +++ op_cost(0); +++ predicate(n->as_Bool()->_test._test == BoolTest::ne || +++ n->as_Bool()->_test._test == BoolTest::eq); +++ +++ format %{ "" %} +++ interface(COND_INTER) %{ +++ equal(0x0, "eq"); +++ greater(0x1, "gt"); +++ overflow(0x2, "overflow"); +++ less(0x3, "lt"); +++ not_equal(0x4, "ne"); +++ less_equal(0x5, "le"); +++ no_overflow(0x6, "no_overflow"); +++ greater_equal(0x7, "ge"); +++ %} +++%} +++ +++operand cmpOpULtGe() +++%{ +++ match(Bool); +++ op_cost(0); +++ predicate(n->as_Bool()->_test._test == BoolTest::lt || +++ n->as_Bool()->_test._test == BoolTest::ge); +++ +++ format %{ "" %} +++ interface(COND_INTER) %{ +++ equal(0x0, "eq"); +++ greater(0x1, "gtu"); +++ overflow(0x2, "overflow"); +++ less(0x3, "ltu"); +++ not_equal(0x4, "ne"); +++ less_equal(0x5, "leu"); +++ no_overflow(0x6, "no_overflow"); +++ greater_equal(0x7, "geu"); +++ %} +++%} +++ +++operand cmpOpUEqNeLeGt() +++%{ +++ match(Bool); +++ op_cost(0); +++ predicate(n->as_Bool()->_test._test == BoolTest::ne || +++ n->as_Bool()->_test._test == BoolTest::eq || +++ n->as_Bool()->_test._test == BoolTest::le || +++ n->as_Bool()->_test._test == BoolTest::gt); +++ +++ format %{ "" %} +++ interface(COND_INTER) %{ +++ equal(0x0, "eq"); +++ greater(0x1, "gtu"); +++ overflow(0x2, "overflow"); +++ less(0x3, "ltu"); +++ not_equal(0x4, "ne"); +++ less_equal(0x5, "leu"); +++ no_overflow(0x6, "no_overflow"); +++ greater_equal(0x7, "geu"); +++ %} +++%} +++ +++ +++// Flags register, used as output of compare logic +++operand rFlagsReg() +++%{ +++ constraint(ALLOC_IN_RC(reg_flags)); +++ match(RegFlags); +++ +++ op_cost(0); +++ format %{ "RFLAGS" %} +++ interface(REG_INTER); +++%} +++ +++// Special Registers +++ +++// Method Register +++operand inline_cache_RegP(iRegP reg) +++%{ +++ constraint(ALLOC_IN_RC(method_reg)); // inline_cache_reg +++ match(reg); +++ match(iRegPNoSp); +++ op_cost(0); +++ format %{ %} +++ interface(REG_INTER); +++%} +++ +++//----------OPERAND CLASSES---------------------------------------------------- +++// Operand Classes are groups of operands that are used as to simplify +++// instruction definitions by not requiring the AD writer to specify +++// separate instructions for every form of operand when the +++// instruction accepts multiple operand types with the same basic +++// encoding and format. The classic case of this is memory operands. +++ +++// memory is used to define read/write location for load/store +++// instruction defs. we can turn a memory op into an Address +++ +++opclass memory(indirect, indOffI, indOffL, indirectN, indOffIN, indOffLN); +++ +++// iRegIorL2I is used for src inputs in rules for 32 bit int (I) +++// operations. it allows the src to be either an iRegI or a (ConvL2I +++// iRegL). in the latter case the l2i normally planted for a ConvL2I +++// can be elided because the 32-bit instruction will just employ the +++// lower 32 bits anyway. +++// +++// n.b. this does not elide all L2I conversions. if the truncated +++// value is consumed by more than one operation then the ConvL2I +++// cannot be bundled into the consuming nodes so an l2i gets planted +++// (actually an addiw $dst, $src, 0) and the downstream instructions +++// consume the result of the L2I as an iRegI input. That's a shame since +++// the addiw is actually redundant but its not too costly. +++ +++opclass iRegIorL2I(iRegI, iRegL2I); +++opclass iRegIorL(iRegI, iRegL); +++opclass iRegNorP(iRegN, iRegP); +++opclass iRegILNP(iRegI, iRegL, iRegN, iRegP); +++opclass iRegILNPNoSp(iRegINoSp, iRegLNoSp, iRegNNoSp, iRegPNoSp); +++opclass immIorL(immI, immL); +++ +++//----------PIPELINE----------------------------------------------------------- +++// Rules which define the behavior of the target architectures pipeline. +++ +++// For specific pipelines, e.g. generic RISC-V, define the stages of that pipeline +++//pipe_desc(ID, EX, MEM, WR); +++#define ID S0 +++#define EX S1 +++#define MEM S2 +++#define WR S3 +++ +++// Integer ALU reg operation +++pipeline %{ +++ +++attributes %{ +++ // RISC-V instructions are of fixed length +++ fixed_size_instructions; // Fixed size instructions TODO does +++ max_instructions_per_bundle = 2; // Generic RISC-V 1, Sifive Series 7 2 +++ // RISC-V instructions come in 32-bit word units +++ instruction_unit_size = 4; // An instruction is 4 bytes long +++ instruction_fetch_unit_size = 64; // The processor fetches one line +++ instruction_fetch_units = 1; // of 64 bytes +++ +++ // List of nop instructions +++ nops( MachNop ); +++%} +++ +++// We don't use an actual pipeline model so don't care about resources +++// or description. we do use pipeline classes to introduce fixed +++// latencies +++ +++//----------RESOURCES---------------------------------------------------------- +++// Resources are the functional units available to the machine +++ +++// Generic RISC-V pipeline +++// 1 decoder +++// 1 instruction decoded per cycle +++// 1 load/store ops per cycle, 1 branch, 1 FPU +++// 1 mul, 1 div +++ +++resources ( DECODE, +++ ALU, +++ MUL, +++ DIV, +++ BRANCH, +++ LDST, +++ FPU); +++ +++//----------PIPELINE DESCRIPTION----------------------------------------------- +++// Pipeline Description specifies the stages in the machine's pipeline +++ +++// Define the pipeline as a generic 6 stage pipeline +++pipe_desc(S0, S1, S2, S3, S4, S5); +++ +++//----------PIPELINE CLASSES--------------------------------------------------- +++// Pipeline Classes describe the stages in which input and output are +++// referenced by the hardware pipeline. +++ +++pipe_class fp_dop_reg_reg_s(fRegF dst, fRegF src1, fRegF src2) +++%{ +++ single_instruction; +++ src1 : S1(read); +++ src2 : S2(read); +++ dst : S5(write); +++ DECODE : ID; +++ FPU : S5; +++%} +++ +++pipe_class fp_dop_reg_reg_d(fRegD dst, fRegD src1, fRegD src2) +++%{ +++ src1 : S1(read); +++ src2 : S2(read); +++ dst : S5(write); +++ DECODE : ID; +++ FPU : S5; +++%} +++ +++pipe_class fp_uop_s(fRegF dst, fRegF src) +++%{ +++ single_instruction; +++ src : S1(read); +++ dst : S5(write); +++ DECODE : ID; +++ FPU : S5; +++%} +++ +++pipe_class fp_uop_d(fRegD dst, fRegD src) +++%{ +++ single_instruction; +++ src : S1(read); +++ dst : S5(write); +++ DECODE : ID; +++ FPU : S5; +++%} +++ +++pipe_class fp_d2f(fRegF dst, fRegD src) +++%{ +++ single_instruction; +++ src : S1(read); +++ dst : S5(write); +++ DECODE : ID; +++ FPU : S5; +++%} +++ +++pipe_class fp_f2d(fRegD dst, fRegF src) +++%{ +++ single_instruction; +++ src : S1(read); +++ dst : S5(write); +++ DECODE : ID; +++ FPU : S5; +++%} +++ +++pipe_class fp_f2i(iRegINoSp dst, fRegF src) +++%{ +++ single_instruction; +++ src : S1(read); +++ dst : S5(write); +++ DECODE : ID; +++ FPU : S5; +++%} +++ +++pipe_class fp_f2l(iRegLNoSp dst, fRegF src) +++%{ +++ single_instruction; +++ src : S1(read); +++ dst : S5(write); +++ DECODE : ID; +++ FPU : S5; +++%} +++ +++pipe_class fp_i2f(fRegF dst, iRegIorL2I src) +++%{ +++ single_instruction; +++ src : S1(read); +++ dst : S5(write); +++ DECODE : ID; +++ FPU : S5; +++%} +++ +++pipe_class fp_l2f(fRegF dst, iRegL src) +++%{ +++ single_instruction; +++ src : S1(read); +++ dst : S5(write); +++ DECODE : ID; +++ FPU : S5; +++%} +++ +++pipe_class fp_d2i(iRegINoSp dst, fRegD src) +++%{ +++ single_instruction; +++ src : S1(read); +++ dst : S5(write); +++ DECODE : ID; +++ FPU : S5; +++%} +++ +++pipe_class fp_d2l(iRegLNoSp dst, fRegD src) +++%{ +++ single_instruction; +++ src : S1(read); +++ dst : S5(write); +++ DECODE : ID; +++ FPU : S5; +++%} +++ +++pipe_class fp_i2d(fRegD dst, iRegIorL2I src) +++%{ +++ single_instruction; +++ src : S1(read); +++ dst : S5(write); +++ DECODE : ID; +++ FPU : S5; +++%} +++ +++pipe_class fp_l2d(fRegD dst, iRegIorL2I src) +++%{ +++ single_instruction; +++ src : S1(read); +++ dst : S5(write); +++ DECODE : ID; +++ FPU : S5; +++%} +++ +++pipe_class fp_div_s(fRegF dst, fRegF src1, fRegF src2) +++%{ +++ single_instruction; +++ src1 : S1(read); +++ src2 : S2(read); +++ dst : S5(write); +++ DECODE : ID; +++ FPU : S5; +++%} +++ +++pipe_class fp_div_d(fRegD dst, fRegD src1, fRegD src2) +++%{ +++ single_instruction; +++ src1 : S1(read); +++ src2 : S2(read); +++ dst : S5(write); +++ DECODE : ID; +++ FPU : S5; +++%} +++ +++pipe_class fp_sqrt_s(fRegF dst, fRegF src1, fRegF src2) +++%{ +++ single_instruction; +++ src1 : S1(read); +++ src2 : S2(read); +++ dst : S5(write); +++ DECODE : ID; +++ FPU : S5; +++%} +++ +++pipe_class fp_sqrt_d(fRegD dst, fRegD src1, fRegD src2) +++%{ +++ single_instruction; +++ src1 : S1(read); +++ src2 : S2(read); +++ dst : S5(write); +++ DECODE : ID; +++ FPU : S5; +++%} +++ +++pipe_class fp_load_constant_s(fRegF dst) +++%{ +++ single_instruction; +++ dst : S5(write); +++ DECODE : ID; +++ FPU : S5; +++%} +++ +++pipe_class fp_load_constant_d(fRegD dst) +++%{ +++ single_instruction; +++ dst : S5(write); +++ DECODE : ID; +++ FPU : S5; +++%} +++ +++pipe_class fp_load_mem_s(fRegF dst, memory mem) +++%{ +++ single_instruction; +++ mem : S1(read); +++ dst : S5(write); +++ DECODE : ID; +++ LDST : MEM; +++%} +++ +++pipe_class fp_load_mem_d(fRegD dst, memory mem) +++%{ +++ single_instruction; +++ mem : S1(read); +++ dst : S5(write); +++ DECODE : ID; +++ LDST : MEM; +++%} +++ +++pipe_class fp_store_reg_s(fRegF src, memory mem) +++%{ +++ single_instruction; +++ src : S1(read); +++ mem : S5(write); +++ DECODE : ID; +++ LDST : MEM; +++%} +++ +++pipe_class fp_store_reg_d(fRegD src, memory mem) +++%{ +++ single_instruction; +++ src : S1(read); +++ mem : S5(write); +++ DECODE : ID; +++ LDST : MEM; +++%} +++ +++//------- Integer ALU operations -------------------------- +++ +++// Integer ALU reg-reg operation +++// Operands needs in ID, result generated in EX +++// E.g. ADD Rd, Rs1, Rs2 +++pipe_class ialu_reg_reg(iRegI dst, iRegI src1, iRegI src2) +++%{ +++ single_instruction; +++ dst : EX(write); +++ src1 : ID(read); +++ src2 : ID(read); +++ DECODE : ID; +++ ALU : EX; +++%} +++ +++// Integer ALU reg operation with constant shift +++// E.g. SLLI Rd, Rs1, #shift +++pipe_class ialu_reg_shift(iRegI dst, iRegI src1) +++%{ +++ single_instruction; +++ dst : EX(write); +++ src1 : ID(read); +++ DECODE : ID; +++ ALU : EX; +++%} +++ +++// Integer ALU reg-reg operation with variable shift +++// both operands must be available in ID +++// E.g. SLL Rd, Rs1, Rs2 +++pipe_class ialu_reg_reg_vshift(iRegI dst, iRegI src1, iRegI src2) +++%{ +++ single_instruction; +++ dst : EX(write); +++ src1 : ID(read); +++ src2 : ID(read); +++ DECODE : ID; +++ ALU : EX; +++%} +++ +++// Integer ALU reg operation +++// E.g. NEG Rd, Rs2 +++pipe_class ialu_reg(iRegI dst, iRegI src) +++%{ +++ single_instruction; +++ dst : EX(write); +++ src : ID(read); +++ DECODE : ID; +++ ALU : EX; +++%} +++ +++// Integer ALU reg immediate operation +++// E.g. ADDI Rd, Rs1, #imm +++pipe_class ialu_reg_imm(iRegI dst, iRegI src1) +++%{ +++ single_instruction; +++ dst : EX(write); +++ src1 : ID(read); +++ DECODE : ID; +++ ALU : EX; +++%} +++ +++// Integer ALU immediate operation (no source operands) +++// E.g. LI Rd, #imm +++pipe_class ialu_imm(iRegI dst) +++%{ +++ single_instruction; +++ dst : EX(write); +++ DECODE : ID; +++ ALU : EX; +++%} +++ +++//------- Multiply pipeline operations -------------------- +++ +++// Multiply reg-reg +++// E.g. MULW Rd, Rs1, Rs2 +++pipe_class imul_reg_reg(iRegI dst, iRegI src1, iRegI src2) +++%{ +++ single_instruction; +++ dst : WR(write); +++ src1 : ID(read); +++ src2 : ID(read); +++ DECODE : ID; +++ MUL : WR; +++%} +++ +++// E.g. MUL RD, Rs1, Rs2 +++pipe_class lmul_reg_reg(iRegL dst, iRegL src1, iRegL src2) +++%{ +++ single_instruction; +++ fixed_latency(3); // Maximum latency for 64 bit mul +++ dst : WR(write); +++ src1 : ID(read); +++ src2 : ID(read); +++ DECODE : ID; +++ MUL : WR; +++%} +++ +++//------- Divide pipeline operations -------------------- +++ +++// E.g. DIVW Rd, Rs1, Rs2 +++pipe_class idiv_reg_reg(iRegI dst, iRegI src1, iRegI src2) +++%{ +++ single_instruction; +++ fixed_latency(8); // Maximum latency for 32 bit divide +++ dst : WR(write); +++ src1 : ID(read); +++ src2 : ID(read); +++ DECODE : ID; +++ DIV : WR; +++%} +++ +++// E.g. DIV RD, Rs1, Rs2 +++pipe_class ldiv_reg_reg(iRegL dst, iRegL src1, iRegL src2) +++%{ +++ single_instruction; +++ fixed_latency(16); // Maximum latency for 64 bit divide +++ dst : WR(write); +++ src1 : ID(read); +++ src2 : ID(read); +++ DECODE : ID; +++ DIV : WR; +++%} +++ +++//------- Load pipeline operations ------------------------ +++ +++// Load - reg, mem +++// E.g. LA Rd, mem +++pipe_class iload_reg_mem(iRegI dst, memory mem) +++%{ +++ single_instruction; +++ dst : WR(write); +++ mem : ID(read); +++ DECODE : ID; +++ LDST : MEM; +++%} +++ +++// Load - reg, reg +++// E.g. LD Rd, Rs +++pipe_class iload_reg_reg(iRegI dst, iRegI src) +++%{ +++ single_instruction; +++ dst : WR(write); +++ src : ID(read); +++ DECODE : ID; +++ LDST : MEM; +++%} +++ +++//------- Store pipeline operations ----------------------- +++ +++// Store - zr, mem +++// E.g. SD zr, mem +++pipe_class istore_mem(memory mem) +++%{ +++ single_instruction; +++ mem : ID(read); +++ DECODE : ID; +++ LDST : MEM; +++%} +++ +++// Store - reg, mem +++// E.g. SD Rs, mem +++pipe_class istore_reg_mem(iRegI src, memory mem) +++%{ +++ single_instruction; +++ mem : ID(read); +++ src : EX(read); +++ DECODE : ID; +++ LDST : MEM; +++%} +++ +++// Store - reg, reg +++// E.g. SD Rs2, Rs1 +++pipe_class istore_reg_reg(iRegI dst, iRegI src) +++%{ +++ single_instruction; +++ dst : ID(read); +++ src : EX(read); +++ DECODE : ID; +++ LDST : MEM; +++%} +++ +++//------- Control transfer pipeline operations ------------ +++ +++// Branch +++pipe_class pipe_branch() +++%{ +++ single_instruction; +++ DECODE : ID; +++ BRANCH : EX; +++%} +++ +++// Branch +++pipe_class pipe_branch_reg(iRegI src) +++%{ +++ single_instruction; +++ src : ID(read); +++ DECODE : ID; +++ BRANCH : EX; +++%} +++ +++// Compare & Branch +++// E.g. BEQ Rs1, Rs2, L +++pipe_class pipe_cmp_branch(iRegI src1, iRegI src2) +++%{ +++ single_instruction; +++ src1 : ID(read); +++ src2 : ID(read); +++ DECODE : ID; +++ BRANCH : EX; +++%} +++ +++// E.g. BEQZ Rs, L +++pipe_class pipe_cmpz_branch(iRegI src) +++%{ +++ single_instruction; +++ src : ID(read); +++ DECODE : ID; +++ BRANCH : EX; +++%} +++ +++//------- Synchronisation operations ---------------------- +++// Any operation requiring serialization +++// E.g. FENCE/Atomic Ops/Load Acquire/Store Release +++pipe_class pipe_serial() +++%{ +++ single_instruction; +++ force_serialization; +++ fixed_latency(16); +++ DECODE : ID; +++ LDST : MEM; +++%} +++ +++pipe_class pipe_slow() +++%{ +++ instruction_count(10); +++ multiple_bundles; +++ force_serialization; +++ fixed_latency(16); +++ DECODE : ID; +++ LDST : MEM; +++%} +++ +++// Empty pipeline class +++pipe_class pipe_class_empty() +++%{ +++ single_instruction; +++ fixed_latency(0); +++%} +++ +++// Default pipeline class. +++pipe_class pipe_class_default() +++%{ +++ single_instruction; +++ fixed_latency(2); +++%} +++ +++// Pipeline class for compares. +++pipe_class pipe_class_compare() +++%{ +++ single_instruction; +++ fixed_latency(16); +++%} +++ +++// Pipeline class for memory operations. +++pipe_class pipe_class_memory() +++%{ +++ single_instruction; +++ fixed_latency(16); +++%} +++ +++// Pipeline class for call. +++pipe_class pipe_class_call() +++%{ +++ single_instruction; +++ fixed_latency(100); +++%} +++ +++// Define the class for the Nop node. +++define %{ +++ MachNop = pipe_class_empty; +++%} +++%} +++//----------INSTRUCTIONS------------------------------------------------------- +++// +++// match -- States which machine-independent subtree may be replaced +++// by this instruction. +++// ins_cost -- The estimated cost of this instruction is used by instruction +++// selection to identify a minimum cost tree of machine +++// instructions that matches a tree of machine-independent +++// instructions. +++// format -- A string providing the disassembly for this instruction. +++// The value of an instruction's operand may be inserted +++// by referring to it with a '$' prefix. +++// opcode -- Three instruction opcodes may be provided. These are referred +++// to within an encode class as $primary, $secondary, and $tertiary +++// rrspectively. The primary opcode is commonly used to +++// indicate the type of machine instruction, while secondary +++// and tertiary are often used for prefix options or addressing +++// modes. +++// ins_encode -- A list of encode classes with parameters. The encode class +++// name must have been defined in an 'enc_class' specification +++// in the encode section of the architecture description. +++ +++// ============================================================================ +++// Memory (Load/Store) Instructions +++ +++// Load Instructions +++ +++// Load Byte (8 bit signed) +++instruct loadB(iRegINoSp dst, memory mem) +++%{ +++ match(Set dst (LoadB mem)); +++ +++ ins_cost(LOAD_COST); +++ format %{ "lb $dst, $mem\t# byte, #@loadB" %} +++ +++ ins_encode %{ +++ __ lb(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp)); +++ %} +++ +++ ins_pipe(iload_reg_mem); +++%} +++ +++// Load Byte (8 bit signed) into long +++instruct loadB2L(iRegLNoSp dst, memory mem) +++%{ +++ match(Set dst (ConvI2L (LoadB mem))); +++ +++ ins_cost(LOAD_COST); +++ format %{ "lb $dst, $mem\t# byte, #@loadB2L" %} +++ +++ ins_encode %{ +++ __ lb(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp)); +++ %} +++ +++ ins_pipe(iload_reg_mem); +++%} +++ +++// Load Byte (8 bit unsigned) +++instruct loadUB(iRegINoSp dst, memory mem) +++%{ +++ match(Set dst (LoadUB mem)); +++ +++ ins_cost(LOAD_COST); +++ format %{ "lbu $dst, $mem\t# byte, #@loadUB" %} +++ +++ ins_encode %{ +++ __ lbu(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp)); +++ %} +++ +++ ins_pipe(iload_reg_mem); +++%} +++ +++// Load Byte (8 bit unsigned) into long +++instruct loadUB2L(iRegLNoSp dst, memory mem) +++%{ +++ match(Set dst (ConvI2L (LoadUB mem))); +++ +++ ins_cost(LOAD_COST); +++ format %{ "lbu $dst, $mem\t# byte, #@loadUB2L" %} +++ +++ ins_encode %{ +++ __ lbu(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp)); +++ %} +++ +++ ins_pipe(iload_reg_mem); +++%} +++ +++// Load Short (16 bit signed) +++instruct loadS(iRegINoSp dst, memory mem) +++%{ +++ match(Set dst (LoadS mem)); +++ +++ ins_cost(LOAD_COST); +++ format %{ "lh $dst, $mem\t# short, #@loadS" %} +++ +++ ins_encode %{ +++ __ lh(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp)); +++ %} +++ +++ ins_pipe(iload_reg_mem); +++%} +++ +++// Load Short (16 bit signed) into long +++instruct loadS2L(iRegLNoSp dst, memory mem) +++%{ +++ match(Set dst (ConvI2L (LoadS mem))); +++ +++ ins_cost(LOAD_COST); +++ format %{ "lh $dst, $mem\t# short, #@loadS2L" %} +++ +++ ins_encode %{ +++ __ lh(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp)); +++ %} +++ +++ ins_pipe(iload_reg_mem); +++%} +++ +++// Load Char (16 bit unsigned) +++instruct loadUS(iRegINoSp dst, memory mem) +++%{ +++ match(Set dst (LoadUS mem)); +++ +++ ins_cost(LOAD_COST); +++ format %{ "lhu $dst, $mem\t# short, #@loadUS" %} +++ +++ ins_encode %{ +++ __ lhu(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp)); +++ %} +++ +++ ins_pipe(iload_reg_mem); +++%} +++ +++// Load Short/Char (16 bit unsigned) into long +++instruct loadUS2L(iRegLNoSp dst, memory mem) +++%{ +++ match(Set dst (ConvI2L (LoadUS mem))); +++ +++ ins_cost(LOAD_COST); +++ format %{ "lhu $dst, $mem\t# short, #@loadUS2L" %} +++ +++ ins_encode %{ +++ __ lhu(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp)); +++ %} +++ +++ ins_pipe(iload_reg_mem); +++%} +++ +++// Load Integer (32 bit signed) +++instruct loadI(iRegINoSp dst, memory mem) +++%{ +++ match(Set dst (LoadI mem)); +++ +++ ins_cost(LOAD_COST); +++ format %{ "lw $dst, $mem\t# int, #@loadI" %} +++ +++ ins_encode %{ +++ __ lw(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp)); +++ %} +++ +++ ins_pipe(iload_reg_mem); +++%} +++ +++// Load Integer (32 bit signed) into long +++instruct loadI2L(iRegLNoSp dst, memory mem) +++%{ +++ match(Set dst (ConvI2L (LoadI mem))); +++ +++ ins_cost(LOAD_COST); +++ format %{ "lw $dst, $mem\t# int, #@loadI2L" %} +++ +++ ins_encode %{ +++ __ lw(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp)); +++ %} +++ +++ ins_pipe(iload_reg_mem); +++%} +++ +++// Load Integer (32 bit unsigned) into long +++instruct loadUI2L(iRegLNoSp dst, memory mem, immL_32bits mask) +++%{ +++ match(Set dst (AndL (ConvI2L (LoadI mem)) mask)); +++ +++ ins_cost(LOAD_COST); +++ format %{ "lwu $dst, $mem\t# int, #@loadUI2L" %} +++ +++ ins_encode %{ +++ __ lwu(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp)); +++ %} +++ +++ ins_pipe(iload_reg_mem); +++%} +++ +++// Load Long (64 bit signed) +++instruct loadL(iRegLNoSp dst, memory mem) +++%{ +++ match(Set dst (LoadL mem)); +++ +++ ins_cost(LOAD_COST); +++ format %{ "ld $dst, $mem\t# int, #@loadL" %} +++ +++ ins_encode %{ +++ __ ld(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp)); +++ %} +++ +++ ins_pipe(iload_reg_mem); +++%} +++ +++// Load Range +++instruct loadRange(iRegINoSp dst, memory mem) +++%{ +++ match(Set dst (LoadRange mem)); +++ +++ ins_cost(LOAD_COST); +++ format %{ "lwu $dst, $mem\t# range, #@loadRange" %} +++ +++ ins_encode %{ +++ __ lwu(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp)); +++ %} +++ +++ ins_pipe(iload_reg_mem); +++%} +++ +++// Load Pointer +++instruct loadP(iRegPNoSp dst, memory mem) +++%{ +++ match(Set dst (LoadP mem)); +++ predicate(n->as_Load()->barrier_data() == 0); +++ +++ ins_cost(LOAD_COST); +++ format %{ "ld $dst, $mem\t# ptr, #@loadP" %} +++ +++ ins_encode %{ +++ __ ld(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp)); +++ %} +++ +++ ins_pipe(iload_reg_mem); +++%} +++ +++// Load Compressed Pointer +++instruct loadN(iRegNNoSp dst, memory mem) +++%{ +++ match(Set dst (LoadN mem)); +++ +++ ins_cost(LOAD_COST); +++ format %{ "lwu $dst, $mem\t# loadN, compressed ptr, #@loadN" %} +++ +++ ins_encode %{ +++ __ lwu(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp)); +++ %} +++ +++ ins_pipe(iload_reg_mem); +++%} +++ +++// Load Klass Pointer +++instruct loadKlass(iRegPNoSp dst, memory mem) +++%{ +++ match(Set dst (LoadKlass mem)); +++ +++ ins_cost(LOAD_COST); +++ format %{ "ld $dst, $mem\t# class, #@loadKlass" %} +++ +++ ins_encode %{ +++ __ ld(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp)); +++ %} +++ +++ ins_pipe(iload_reg_mem); +++%} +++ +++// Load Narrow Klass Pointer +++instruct loadNKlass(iRegNNoSp dst, memory mem) +++%{ +++ match(Set dst (LoadNKlass mem)); +++ +++ ins_cost(LOAD_COST); +++ format %{ "lwu $dst, $mem\t# loadNKlass, compressed class ptr, #@loadNKlass" %} +++ +++ ins_encode %{ +++ __ lwu(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp)); +++ %} +++ +++ ins_pipe(iload_reg_mem); +++%} +++ +++// Load Float +++instruct loadF(fRegF dst, memory mem) +++%{ +++ match(Set dst (LoadF mem)); +++ +++ ins_cost(LOAD_COST); +++ format %{ "flw $dst, $mem\t# float, #@loadF" %} +++ +++ ins_encode %{ +++ __ flw(as_FloatRegister($dst$$reg), Address(as_Register($mem$$base), $mem$$disp)); +++ %} +++ +++ ins_pipe(fp_load_mem_s); +++%} +++ +++// Load Double +++instruct loadD(fRegD dst, memory mem) +++%{ +++ match(Set dst (LoadD mem)); +++ +++ ins_cost(LOAD_COST); +++ format %{ "fld $dst, $mem\t# double, #@loadD" %} +++ +++ ins_encode %{ +++ __ fld(as_FloatRegister($dst$$reg), Address(as_Register($mem$$base), $mem$$disp)); +++ %} +++ +++ ins_pipe(fp_load_mem_d); +++%} +++ +++// Load Int Constant +++instruct loadConI(iRegINoSp dst, immI src) +++%{ +++ match(Set dst src); +++ +++ ins_cost(ALU_COST); +++ format %{ "li $dst, $src\t# int, #@loadConI" %} +++ +++ ins_encode(riscv_enc_li_imm(dst, src)); +++ +++ ins_pipe(ialu_imm); +++%} +++ +++// Load Long Constant +++instruct loadConL(iRegLNoSp dst, immL src) +++%{ +++ match(Set dst src); +++ +++ ins_cost(ALU_COST); +++ format %{ "li $dst, $src\t# long, #@loadConL" %} +++ +++ ins_encode(riscv_enc_li_imm(dst, src)); +++ +++ ins_pipe(ialu_imm); +++%} +++ +++// Load Pointer Constant +++instruct loadConP(iRegPNoSp dst, immP con) +++%{ +++ match(Set dst con); +++ +++ ins_cost(ALU_COST); +++ format %{ "mv $dst, $con\t# ptr, #@loadConP" %} +++ +++ ins_encode(riscv_enc_mov_p(dst, con)); +++ +++ ins_pipe(ialu_imm); +++%} +++ +++// Load Null Pointer Constant +++instruct loadConP0(iRegPNoSp dst, immP0 con) +++%{ +++ match(Set dst con); +++ +++ ins_cost(ALU_COST); +++ format %{ "mv $dst, $con\t# NULL ptr, #@loadConP0" %} +++ +++ ins_encode(riscv_enc_mov_zero(dst)); +++ +++ ins_pipe(ialu_imm); +++%} +++ +++// Load Pointer Constant One +++instruct loadConP1(iRegPNoSp dst, immP_1 con) +++%{ +++ match(Set dst con); +++ +++ ins_cost(ALU_COST); +++ format %{ "mv $dst, $con\t# load ptr constant one, #@loadConP1" %} +++ +++ ins_encode(riscv_enc_mov_p1(dst)); +++ +++ ins_pipe(ialu_imm); +++%} +++ +++// Load Byte Map Base Constant +++instruct loadByteMapBase(iRegPNoSp dst, immByteMapBase con) +++%{ +++ match(Set dst con); +++ ins_cost(ALU_COST); +++ format %{ "mv $dst, $con\t# Byte Map Base, #@loadByteMapBase" %} +++ +++ ins_encode(riscv_enc_mov_byte_map_base(dst)); +++ +++ ins_pipe(ialu_imm); +++%} +++ +++// Load Narrow Pointer Constant +++instruct loadConN(iRegNNoSp dst, immN con) +++%{ +++ match(Set dst con); +++ +++ ins_cost(ALU_COST * 4); +++ format %{ "mv $dst, $con\t# compressed ptr, #@loadConN" %} +++ +++ ins_encode(riscv_enc_mov_n(dst, con)); +++ +++ ins_pipe(ialu_imm); +++%} +++ +++// Load Narrow Null Pointer Constant +++instruct loadConN0(iRegNNoSp dst, immN0 con) +++%{ +++ match(Set dst con); +++ +++ ins_cost(ALU_COST); +++ format %{ "mv $dst, $con\t# compressed NULL ptr, #@loadConN0" %} +++ +++ ins_encode(riscv_enc_mov_zero(dst)); +++ +++ ins_pipe(ialu_imm); +++%} +++ +++// Load Narrow Klass Constant +++instruct loadConNKlass(iRegNNoSp dst, immNKlass con) +++%{ +++ match(Set dst con); +++ +++ ins_cost(ALU_COST * 6); +++ format %{ "mv $dst, $con\t# compressed klass ptr, #@loadConNKlass" %} +++ +++ ins_encode(riscv_enc_mov_nk(dst, con)); +++ +++ ins_pipe(ialu_imm); +++%} +++ +++// Load Float Constant +++instruct loadConF(fRegF dst, immF con) %{ +++ match(Set dst con); +++ +++ ins_cost(LOAD_COST); +++ format %{ +++ "flw $dst, [$constantaddress]\t# load from constant table: float=$con, #@loadConF" +++ %} +++ +++ ins_encode %{ +++ __ flw(as_FloatRegister($dst$$reg), $constantaddress($con)); +++ %} +++ +++ ins_pipe(fp_load_constant_s); +++%} +++ +++instruct loadConF0(fRegF dst, immF0 con) %{ +++ match(Set dst con); +++ +++ ins_cost(XFER_COST); +++ +++ format %{ "fmv.w.x $dst, zr\t# float, #@loadConF0" %} +++ +++ ins_encode %{ +++ __ fmv_w_x(as_FloatRegister($dst$$reg), zr); +++ %} +++ +++ ins_pipe(fp_load_constant_s); +++%} +++ +++// Load Double Constant +++instruct loadConD(fRegD dst, immD con) %{ +++ match(Set dst con); +++ +++ ins_cost(LOAD_COST); +++ format %{ +++ "fld $dst, [$constantaddress]\t# load from constant table: double=$con, #@loadConD" +++ %} +++ +++ ins_encode %{ +++ __ fld(as_FloatRegister($dst$$reg), $constantaddress($con)); +++ %} +++ +++ ins_pipe(fp_load_constant_d); +++%} +++ +++instruct loadConD0(fRegD dst, immD0 con) %{ +++ match(Set dst con); +++ +++ ins_cost(XFER_COST); +++ +++ format %{ "fmv.d.x $dst, zr\t# double, #@loadConD0" %} +++ +++ ins_encode %{ +++ __ fmv_d_x(as_FloatRegister($dst$$reg), zr); +++ %} +++ +++ ins_pipe(fp_load_constant_d); +++%} +++ +++// Store Instructions +++// Store CMS card-mark Immediate +++instruct storeimmCM0(immI0 zero, memory mem) +++%{ +++ match(Set mem (StoreCM mem zero)); +++ +++ ins_cost(STORE_COST); +++ format %{ "storestore (elided)\n\t" +++ "sb zr, $mem\t# byte, #@storeimmCM0" %} +++ +++ ins_encode %{ +++ __ sb(zr, Address(as_Register($mem$$base), $mem$$disp)); +++ %} +++ +++ ins_pipe(istore_mem); +++%} +++ +++// Store CMS card-mark Immediate with intervening StoreStore +++// needed when using CMS with no conditional card marking +++instruct storeimmCM0_ordered(immI0 zero, memory mem) +++%{ +++ match(Set mem (StoreCM mem zero)); +++ +++ ins_cost(ALU_COST + STORE_COST); +++ format %{ "membar(StoreStore)\n\t" +++ "sb zr, $mem\t# byte, #@storeimmCM0_ordered" %} +++ +++ ins_encode %{ +++ __ membar(MacroAssembler::LoadStore | MacroAssembler::StoreStore); +++ __ sb(zr, Address(as_Register($mem$$base), $mem$$disp)); +++ %} +++ +++ ins_pipe(istore_mem); +++%} +++ +++// Store Byte +++instruct storeB(iRegIorL2I src, memory mem) +++%{ +++ match(Set mem (StoreB mem src)); +++ +++ ins_cost(STORE_COST); +++ format %{ "sb $src, $mem\t# byte, #@storeB" %} +++ +++ ins_encode %{ +++ __ sb(as_Register($src$$reg), Address(as_Register($mem$$base), $mem$$disp)); +++ %} +++ +++ ins_pipe(istore_reg_mem); +++%} +++ +++instruct storeimmB0(immI0 zero, memory mem) +++%{ +++ match(Set mem (StoreB mem zero)); +++ +++ ins_cost(STORE_COST); +++ format %{ "sb zr, $mem\t# byte, #@storeimmB0" %} +++ +++ ins_encode %{ +++ __ sb(zr, Address(as_Register($mem$$base), $mem$$disp)); +++ %} +++ +++ ins_pipe(istore_mem); +++%} +++ +++// Store Char/Short +++instruct storeC(iRegIorL2I src, memory mem) +++%{ +++ match(Set mem (StoreC mem src)); +++ +++ ins_cost(STORE_COST); +++ format %{ "sh $src, $mem\t# short, #@storeC" %} +++ +++ ins_encode %{ +++ __ sh(as_Register($src$$reg), Address(as_Register($mem$$base), $mem$$disp)); +++ %} +++ +++ ins_pipe(istore_reg_mem); +++%} +++ +++instruct storeimmC0(immI0 zero, memory mem) +++%{ +++ match(Set mem (StoreC mem zero)); +++ +++ ins_cost(STORE_COST); +++ format %{ "sh zr, $mem\t# short, #@storeimmC0" %} +++ +++ ins_encode %{ +++ __ sh(zr, Address(as_Register($mem$$base), $mem$$disp)); +++ %} +++ +++ ins_pipe(istore_mem); +++%} +++ +++// Store Integer +++instruct storeI(iRegIorL2I src, memory mem) +++%{ +++ match(Set mem(StoreI mem src)); +++ +++ ins_cost(STORE_COST); +++ format %{ "sw $src, $mem\t# int, #@storeI" %} +++ +++ ins_encode %{ +++ __ sw(as_Register($src$$reg), Address(as_Register($mem$$base), $mem$$disp)); +++ %} +++ +++ ins_pipe(istore_reg_mem); +++%} +++ +++instruct storeimmI0(immI0 zero, memory mem) +++%{ +++ match(Set mem(StoreI mem zero)); +++ +++ ins_cost(STORE_COST); +++ format %{ "sw zr, $mem\t# int, #@storeimmI0" %} +++ +++ ins_encode %{ +++ __ sw(zr, Address(as_Register($mem$$base), $mem$$disp)); +++ %} +++ +++ ins_pipe(istore_mem); +++%} +++ +++// Store Long (64 bit signed) +++instruct storeL(iRegL src, memory mem) +++%{ +++ match(Set mem (StoreL mem src)); +++ +++ ins_cost(STORE_COST); +++ format %{ "sd $src, $mem\t# long, #@storeL" %} +++ +++ ins_encode %{ +++ __ sd(as_Register($src$$reg), Address(as_Register($mem$$base), $mem$$disp)); +++ %} +++ +++ ins_pipe(istore_reg_mem); +++%} +++ +++// Store Long (64 bit signed) +++instruct storeimmL0(immL0 zero, memory mem) +++%{ +++ match(Set mem (StoreL mem zero)); +++ +++ ins_cost(STORE_COST); +++ format %{ "sd zr, $mem\t# long, #@storeimmL0" %} +++ +++ ins_encode %{ +++ __ sd(zr, Address(as_Register($mem$$base), $mem$$disp)); +++ %} +++ +++ ins_pipe(istore_mem); +++%} +++ +++// Store Pointer +++instruct storeP(iRegP src, memory mem) +++%{ +++ match(Set mem (StoreP mem src)); +++ +++ ins_cost(STORE_COST); +++ format %{ "sd $src, $mem\t# ptr, #@storeP" %} +++ +++ ins_encode %{ +++ __ sd(as_Register($src$$reg), Address(as_Register($mem$$base), $mem$$disp)); +++ %} +++ +++ ins_pipe(istore_reg_mem); +++%} +++ +++// Store Pointer +++instruct storeimmP0(immP0 zero, memory mem) +++%{ +++ match(Set mem (StoreP mem zero)); +++ +++ ins_cost(STORE_COST); +++ format %{ "sd zr, $mem\t# ptr, #@storeimmP0" %} +++ +++ ins_encode %{ +++ __ sd(zr, Address(as_Register($mem$$base), $mem$$disp)); +++ %} +++ +++ ins_pipe(istore_mem); +++%} +++ +++// Store Compressed Pointer +++instruct storeN(iRegN src, memory mem) +++%{ +++ match(Set mem (StoreN mem src)); +++ +++ ins_cost(STORE_COST); +++ format %{ "sw $src, $mem\t# compressed ptr, #@storeN" %} +++ +++ ins_encode %{ +++ __ sw(as_Register($src$$reg), Address(as_Register($mem$$base), $mem$$disp)); +++ %} +++ +++ ins_pipe(istore_reg_mem); +++%} +++ +++instruct storeImmN0(immN0 zero, memory mem) +++%{ +++ match(Set mem (StoreN mem zero)); +++ +++ ins_cost(STORE_COST); +++ format %{ "sw zr, $mem\t# compressed ptr, #@storeImmN0" %} +++ +++ ins_encode %{ +++ __ sw(zr, Address(as_Register($mem$$base), $mem$$disp)); +++ %} +++ +++ ins_pipe(istore_reg_mem); +++%} +++ +++// Store Float +++instruct storeF(fRegF src, memory mem) +++%{ +++ match(Set mem (StoreF mem src)); +++ +++ ins_cost(STORE_COST); +++ format %{ "fsw $src, $mem\t# float, #@storeF" %} +++ +++ ins_encode %{ +++ __ fsw(as_FloatRegister($src$$reg), Address(as_Register($mem$$base), $mem$$disp)); +++ %} +++ +++ ins_pipe(fp_store_reg_s); +++%} +++ +++// Store Double +++instruct storeD(fRegD src, memory mem) +++%{ +++ match(Set mem (StoreD mem src)); +++ +++ ins_cost(STORE_COST); +++ format %{ "fsd $src, $mem\t# double, #@storeD" %} +++ +++ ins_encode %{ +++ __ fsd(as_FloatRegister($src$$reg), Address(as_Register($mem$$base), $mem$$disp)); +++ %} +++ +++ ins_pipe(fp_store_reg_d); +++%} +++ +++// Store Compressed Klass Pointer +++instruct storeNKlass(iRegN src, memory mem) +++%{ +++ match(Set mem (StoreNKlass mem src)); +++ +++ ins_cost(STORE_COST); +++ format %{ "sw $src, $mem\t# compressed klass ptr, #@storeNKlass" %} +++ +++ ins_encode %{ +++ __ sw(as_Register($src$$reg), Address(as_Register($mem$$base), $mem$$disp)); +++ %} +++ +++ ins_pipe(istore_reg_mem); +++%} +++ +++// ============================================================================ +++// Atomic operation instructions +++// +++// Intel and SPARC both implement Ideal Node LoadPLocked and +++// Store{PIL}Conditional instructions using a normal load for the +++// LoadPLocked and a CAS for the Store{PIL}Conditional. +++// +++// The ideal code appears only to use LoadPLocked/storePConditional as a +++// pair to lock object allocations from Eden space when not using +++// TLABs. +++// +++// There does not appear to be a Load{IL}Locked Ideal Node and the +++// Ideal code appears to use Store{IL}Conditional as an alias for CAS +++// and to use StoreIConditional only for 32-bit and StoreLConditional +++// only for 64-bit. +++// +++// We implement LoadPLocked and storePConditional instructions using, +++// respectively the RISCV hw load-reserve and store-conditional +++// instructions. Whereas we must implement each of +++// Store{IL}Conditional using a CAS which employs a pair of +++// instructions comprising a load-reserve followed by a +++// store-conditional. +++ +++ +++// Locked-load (load reserved) of the current heap-top +++// used when updating the eden heap top +++// implemented using lr_d on RISCV64 +++instruct loadPLocked(iRegPNoSp dst, indirect mem) +++%{ +++ match(Set dst (LoadPLocked mem)); +++ +++ ins_cost(ALU_COST * 2 + LOAD_COST); +++ +++ format %{ "lr.d $dst, $mem\t# ptr load reserved, #@loadPLocked" %} +++ +++ ins_encode %{ +++ __ la(t0, Address(as_Register($mem$$base), $mem$$disp)); +++ __ lr_d($dst$$Register, t0, Assembler::aq); +++ %} +++ +++ ins_pipe(pipe_serial); +++%} +++ +++// Conditional-store of the updated heap-top. +++// Used during allocation of the shared heap. +++// implemented using sc_d on RISCV64. +++instruct storePConditional(memory heap_top_ptr, iRegP oldval, iRegP newval, rFlagsReg cr) +++%{ +++ match(Set cr (StorePConditional heap_top_ptr (Binary oldval newval))); +++ +++ ins_cost(ALU_COST * 2 + STORE_COST); +++ +++ format %{ +++ "sc_d t1, $newval $heap_top_ptr,\t# ptr store conditional, #@storePConditional" +++ %} +++ +++ ins_encode %{ +++ __ la(t0, Address(as_Register($heap_top_ptr$$base), $heap_top_ptr$$disp)); +++ __ sc_d($cr$$Register, $newval$$Register, t0, Assembler::rl); +++ %} +++ +++ ins_pipe(pipe_serial); +++%} +++ +++instruct storeLConditional(indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) +++%{ +++ match(Set cr (StoreLConditional mem (Binary oldval newval))); +++ +++ ins_cost(LOAD_COST + STORE_COST + 2 * BRANCH_COST); +++ +++ format %{ +++ "cmpxchg t1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval" +++ "xorr $cr, $cr, $oldval\t# $cr == 0 on successful write, #@storeLConditional" +++ %} +++ +++ ins_encode %{ +++ __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int64, +++ /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $cr$$Register); +++ __ xorr($cr$$Register,$cr$$Register, $oldval$$Register); +++ %} +++ +++ ins_pipe(pipe_slow); +++%} +++ +++// storeIConditional also has acquire semantics, for no better reason +++// than matching storeLConditional. +++instruct storeIConditional(indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) +++%{ +++ match(Set cr (StoreIConditional mem (Binary oldval newval))); +++ +++ ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 2); +++ +++ format %{ +++ "cmpxchgw t1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval" +++ "xorr $cr, $cr, $oldval\t# $cr == 0 on successful write, #@storeIConditional" +++ %} +++ +++ ins_encode %{ +++ __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int32, +++ /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $cr$$Register); +++ __ xorr($cr$$Register,$cr$$Register, $oldval$$Register); +++ %} +++ +++ ins_pipe(pipe_slow); +++%} +++ +++// standard CompareAndSwapX when we are using barriers +++// these have higher priority than the rules selected by a predicate +++instruct compareAndSwapB(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval, +++ iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr) +++%{ +++ match(Set res (CompareAndSwapB mem (Binary oldval newval))); +++ +++ ins_cost(LOAD_COST + STORE_COST + ALU_COST * 10 + BRANCH_COST * 4); +++ +++ effect(TEMP_DEF res, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr); +++ +++ format %{ +++ "cmpxchg $mem, $oldval, $newval\t# (byte) if $mem == $oldval then $mem <-- $newval\n\t" +++ "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapB" +++ %} +++ +++ ins_encode %{ +++ __ cmpxchg_narrow_value(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int8, +++ Assembler::relaxed /* acquire */, Assembler::rl /* release */, $res$$Register, +++ true /* result as bool */, $tmp1$$Register, $tmp2$$Register, $tmp3$$Register); +++ %} +++ +++ ins_pipe(pipe_slow); +++%} +++ +++instruct compareAndSwapS(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval, +++ iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr) +++%{ +++ match(Set res (CompareAndSwapS mem (Binary oldval newval))); +++ +++ ins_cost(LOAD_COST + STORE_COST + ALU_COST * 11 + BRANCH_COST * 4); +++ +++ effect(TEMP_DEF res, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr); +++ +++ format %{ +++ "cmpxchg $mem, $oldval, $newval\t# (short) if $mem == $oldval then $mem <-- $newval\n\t" +++ "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapS" +++ %} +++ +++ ins_encode %{ +++ __ cmpxchg_narrow_value(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int16, +++ Assembler::relaxed /* acquire */, Assembler::rl /* release */, $res$$Register, +++ true /* result as bool */, $tmp1$$Register, $tmp2$$Register, $tmp3$$Register); +++ %} +++ +++ ins_pipe(pipe_slow); +++%} +++ +++instruct compareAndSwapI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval) +++%{ +++ match(Set res (CompareAndSwapI mem (Binary oldval newval))); +++ +++ ins_cost(LOAD_COST + STORE_COST + ALU_COST * 6 + BRANCH_COST * 4); +++ +++ format %{ +++ "cmpxchg $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval\n\t" +++ "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapI" +++ %} +++ +++ ins_encode(riscv_enc_cmpxchgw(res, mem, oldval, newval)); +++ +++ ins_pipe(pipe_slow); +++%} +++ +++instruct compareAndSwapL(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval) +++%{ +++ match(Set res (CompareAndSwapL mem (Binary oldval newval))); +++ +++ ins_cost(LOAD_COST + STORE_COST + ALU_COST * 6 + BRANCH_COST * 4); +++ +++ format %{ +++ "cmpxchg $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval\n\t" +++ "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapL" +++ %} +++ +++ ins_encode(riscv_enc_cmpxchg(res, mem, oldval, newval)); +++ +++ ins_pipe(pipe_slow); +++%} +++ +++instruct compareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval) +++%{ +++ predicate(n->as_LoadStore()->barrier_data() == 0); +++ +++ match(Set res (CompareAndSwapP mem (Binary oldval newval))); +++ +++ ins_cost(LOAD_COST + STORE_COST + ALU_COST * 6 + BRANCH_COST * 4); +++ +++ format %{ +++ "cmpxchg $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval\n\t" +++ "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapP" +++ %} +++ +++ ins_encode(riscv_enc_cmpxchg(res, mem, oldval, newval)); +++ +++ ins_pipe(pipe_slow); +++%} +++ +++instruct compareAndSwapN(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval) +++%{ +++ match(Set res (CompareAndSwapN mem (Binary oldval newval))); +++ +++ ins_cost(LOAD_COST + STORE_COST + ALU_COST * 8 + BRANCH_COST * 4); +++ +++ format %{ +++ "cmpxchg $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval\n\t" +++ "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapN" +++ %} +++ +++ ins_encode(riscv_enc_cmpxchgn(res, mem, oldval, newval)); +++ +++ ins_pipe(pipe_slow); +++%} +++ +++// alternative CompareAndSwapX when we are eliding barriers +++instruct compareAndSwapBAcq(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval, +++ iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr) +++%{ +++ predicate(needs_acquiring_load_reserved(n)); +++ +++ match(Set res (CompareAndSwapB mem (Binary oldval newval))); +++ +++ ins_cost(LOAD_COST + STORE_COST + ALU_COST * 10 + BRANCH_COST * 4); +++ +++ effect(TEMP_DEF res, KILL cr, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3); +++ +++ format %{ +++ "cmpxchg_acq $mem, $oldval, $newval\t# (byte) if $mem == $oldval then $mem <-- $newval\n\t" +++ "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapBAcq" +++ %} +++ +++ ins_encode %{ +++ __ cmpxchg_narrow_value(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int8, +++ Assembler::aq /* acquire */, Assembler::rl /* release */, $res$$Register, +++ true /* result as bool */, $tmp1$$Register, $tmp2$$Register, $tmp3$$Register); +++ %} +++ +++ ins_pipe(pipe_slow); +++%} +++ +++instruct compareAndSwapSAcq(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval, +++ iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr) +++%{ +++ predicate(needs_acquiring_load_reserved(n)); +++ +++ match(Set res (CompareAndSwapS mem (Binary oldval newval))); +++ +++ ins_cost(LOAD_COST + STORE_COST + ALU_COST * 11 + BRANCH_COST * 4); +++ +++ effect(TEMP_DEF res, KILL cr, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3); +++ +++ format %{ +++ "cmpxchg_acq $mem, $oldval, $newval\t# (short) if $mem == $oldval then $mem <-- $newval\n\t" +++ "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapSAcq" +++ %} +++ +++ ins_encode %{ +++ __ cmpxchg_narrow_value(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int16, +++ Assembler::aq /* acquire */, Assembler::rl /* release */, $res$$Register, +++ true /* result as bool */, $tmp1$$Register, $tmp2$$Register, $tmp3$$Register); +++ %} +++ +++ ins_pipe(pipe_slow); +++%} +++ +++instruct compareAndSwapIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval) +++%{ +++ predicate(needs_acquiring_load_reserved(n)); +++ +++ match(Set res (CompareAndSwapI mem (Binary oldval newval))); +++ +++ ins_cost(LOAD_COST + STORE_COST + ALU_COST * 6 + BRANCH_COST * 4); +++ +++ format %{ +++ "cmpxchg_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval\n\t" +++ "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapIAcq" +++ %} +++ +++ ins_encode(riscv_enc_cmpxchgw_acq(res, mem, oldval, newval)); +++ +++ ins_pipe(pipe_slow); +++%} +++ +++instruct compareAndSwapLAcq(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval) +++%{ +++ predicate(needs_acquiring_load_reserved(n)); +++ +++ match(Set res (CompareAndSwapL mem (Binary oldval newval))); +++ +++ ins_cost(LOAD_COST + STORE_COST + ALU_COST * 6 + BRANCH_COST * 4); +++ +++ format %{ +++ "cmpxchg_acq $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval\n\t" +++ "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapLAcq" +++ %} +++ +++ ins_encode(riscv_enc_cmpxchg_acq(res, mem, oldval, newval)); +++ +++ ins_pipe(pipe_slow); +++%} +++ +++instruct compareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval) +++%{ +++ predicate(needs_acquiring_load_reserved(n) && (n->as_LoadStore()->barrier_data() == 0)); +++ +++ match(Set res (CompareAndSwapP mem (Binary oldval newval))); +++ +++ ins_cost(LOAD_COST + STORE_COST + ALU_COST * 6 + BRANCH_COST * 4); +++ +++ format %{ +++ "cmpxchg_acq $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval\n\t" +++ "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapPAcq" +++ %} +++ +++ ins_encode(riscv_enc_cmpxchg_acq(res, mem, oldval, newval)); +++ +++ ins_pipe(pipe_slow); +++%} +++ +++instruct compareAndSwapNAcq(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval) +++%{ +++ predicate(needs_acquiring_load_reserved(n)); +++ +++ match(Set res (CompareAndSwapN mem (Binary oldval newval))); +++ +++ ins_cost(LOAD_COST + STORE_COST + ALU_COST * 8 + BRANCH_COST * 4); +++ +++ format %{ +++ "cmpxchg_acq $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval\n\t" +++ "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapNAcq" +++ %} +++ +++ ins_encode(riscv_enc_cmpxchgn_acq(res, mem, oldval, newval)); +++ +++ ins_pipe(pipe_slow); +++%} +++ +++// Sundry CAS operations. Note that release is always true, +++// regardless of the memory ordering of the CAS. This is because we +++// need the volatile case to be sequentially consistent but there is +++// no trailing StoreLoad barrier emitted by C2. Unfortunately we +++// can't check the type of memory ordering here, so we always emit a +++// sc_d(w) with rl bit set. +++instruct compareAndExchangeB(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval, +++ iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr) +++%{ +++ match(Set res (CompareAndExchangeB mem (Binary oldval newval))); +++ +++ ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 3 + ALU_COST * 5); +++ +++ effect(TEMP_DEF res, KILL cr, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3); +++ +++ format %{ +++ "cmpxchg $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeB" +++ %} +++ +++ ins_encode %{ +++ __ cmpxchg_narrow_value(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int8, +++ /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register, +++ /*result_as_bool*/ false, $tmp1$$Register, $tmp2$$Register, $tmp3$$Register); +++ %} +++ +++ ins_pipe(pipe_slow); +++%} +++ +++instruct compareAndExchangeS(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval, +++ iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr) +++%{ +++ match(Set res (CompareAndExchangeS mem (Binary oldval newval))); +++ +++ ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 3 + ALU_COST * 6); +++ +++ effect(TEMP_DEF res, KILL cr, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3); +++ +++ format %{ +++ "cmpxchg $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeS" +++ %} +++ +++ ins_encode %{ +++ __ cmpxchg_narrow_value(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int16, +++ /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register, +++ /*result_as_bool*/ false, $tmp1$$Register, $tmp2$$Register, $tmp3$$Register); +++ %} +++ +++ ins_pipe(pipe_slow); +++%} +++ +++instruct compareAndExchangeI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval) +++%{ +++ match(Set res (CompareAndExchangeI mem (Binary oldval newval))); +++ +++ ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 3 + ALU_COST); +++ +++ effect(TEMP_DEF res); +++ +++ format %{ +++ "cmpxchg $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeI" +++ %} +++ +++ ins_encode %{ +++ __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int32, +++ /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register); +++ %} +++ +++ ins_pipe(pipe_slow); +++%} +++ +++instruct compareAndExchangeL(iRegLNoSp res, indirect mem, iRegL oldval, iRegL newval) +++%{ +++ match(Set res (CompareAndExchangeL mem (Binary oldval newval))); +++ +++ ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 3 + ALU_COST); +++ +++ effect(TEMP_DEF res); +++ +++ format %{ +++ "cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeL" +++ %} +++ +++ ins_encode %{ +++ __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int64, +++ /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register); +++ %} +++ +++ ins_pipe(pipe_slow); +++%} +++ +++instruct compareAndExchangeN(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval) +++%{ +++ match(Set res (CompareAndExchangeN mem (Binary oldval newval))); +++ +++ ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 3 + ALU_COST * 3); +++ +++ effect(TEMP_DEF res); +++ +++ format %{ +++ "cmpxchg $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeN" +++ %} +++ +++ ins_encode %{ +++ __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::uint32, +++ /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register); +++ %} +++ +++ ins_pipe(pipe_slow); +++%} +++ +++instruct compareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval) +++%{ +++ predicate(n->as_LoadStore()->barrier_data() == 0); +++ match(Set res (CompareAndExchangeP mem (Binary oldval newval))); +++ +++ ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 3 + ALU_COST); +++ +++ effect(TEMP_DEF res); +++ +++ format %{ +++ "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeP" +++ %} +++ +++ ins_encode %{ +++ __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int64, +++ /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register); +++ %} +++ +++ ins_pipe(pipe_slow); +++%} +++ +++instruct compareAndExchangeBAcq(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval, +++ iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr) +++%{ +++ predicate(needs_acquiring_load_reserved(n)); +++ +++ match(Set res (CompareAndExchangeB mem (Binary oldval newval))); +++ +++ ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 3 + ALU_COST * 5); +++ +++ effect(TEMP_DEF res, KILL cr, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3); +++ +++ format %{ +++ "cmpxchg_acq $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeBAcq" +++ %} +++ +++ ins_encode %{ +++ __ cmpxchg_narrow_value(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int8, +++ /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register, +++ /*result_as_bool*/ false, $tmp1$$Register, $tmp2$$Register, $tmp3$$Register); +++ %} +++ +++ ins_pipe(pipe_slow); +++%} +++ +++instruct compareAndExchangeSAcq(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval, +++ iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr) +++%{ +++ predicate(needs_acquiring_load_reserved(n)); +++ +++ match(Set res (CompareAndExchangeS mem (Binary oldval newval))); +++ +++ ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 3 + ALU_COST * 6); +++ +++ effect(TEMP_DEF res, KILL cr, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3); +++ +++ format %{ +++ "cmpxchg_acq $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeSAcq" +++ %} +++ +++ ins_encode %{ +++ __ cmpxchg_narrow_value(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int16, +++ /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register, +++ /*result_as_bool*/ false, $tmp1$$Register, $tmp2$$Register, $tmp3$$Register); +++ %} +++ +++ ins_pipe(pipe_slow); +++%} +++ +++instruct compareAndExchangeIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval) +++%{ +++ predicate(needs_acquiring_load_reserved(n)); +++ +++ match(Set res (CompareAndExchangeI mem (Binary oldval newval))); +++ +++ ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 3 + ALU_COST); +++ +++ effect(TEMP_DEF res); +++ +++ format %{ +++ "cmpxchg_acq $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeIAcq" +++ %} +++ +++ ins_encode %{ +++ __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int32, +++ /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register); +++ %} +++ +++ ins_pipe(pipe_slow); +++%} +++ +++instruct compareAndExchangeLAcq(iRegLNoSp res, indirect mem, iRegL oldval, iRegL newval) +++%{ +++ predicate(needs_acquiring_load_reserved(n)); +++ +++ match(Set res (CompareAndExchangeL mem (Binary oldval newval))); +++ +++ ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 3 + ALU_COST); +++ +++ effect(TEMP_DEF res); +++ +++ format %{ +++ "cmpxchg_acq $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeLAcq" +++ %} +++ +++ ins_encode %{ +++ __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int64, +++ /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register); +++ %} +++ +++ ins_pipe(pipe_slow); +++%} +++ +++instruct compareAndExchangeNAcq(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval) +++%{ +++ predicate(needs_acquiring_load_reserved(n)); +++ +++ match(Set res (CompareAndExchangeN mem (Binary oldval newval))); +++ +++ ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 3 + ALU_COST); +++ +++ effect(TEMP_DEF res); +++ +++ format %{ +++ "cmpxchg_acq $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeNAcq" +++ %} +++ +++ ins_encode %{ +++ __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::uint32, +++ /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register); +++ %} +++ +++ ins_pipe(pipe_slow); +++%} +++ +++instruct compareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval) +++%{ +++ predicate(needs_acquiring_load_reserved(n) && (n->as_LoadStore()->barrier_data() == 0)); +++ +++ match(Set res (CompareAndExchangeP mem (Binary oldval newval))); +++ +++ ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 3 + ALU_COST); +++ +++ effect(TEMP_DEF res); +++ +++ format %{ +++ "cmpxchg_acq $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangePAcq" +++ %} +++ +++ ins_encode %{ +++ __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int64, +++ /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register); +++ %} +++ +++ ins_pipe(pipe_slow); +++%} +++ +++instruct weakCompareAndSwapB(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval, +++ iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr) +++%{ +++ match(Set res (WeakCompareAndSwapB mem (Binary oldval newval))); +++ +++ ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 2 + ALU_COST * 6); +++ +++ effect(TEMP_DEF res, KILL cr, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3); +++ +++ format %{ +++ "cmpxchg_weak $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval\n\t" +++ "# $res == 1 when success, #@weakCompareAndSwapB" +++ %} +++ +++ ins_encode %{ +++ __ weak_cmpxchg_narrow_value(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int8, +++ /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register, +++ $tmp1$$Register, $tmp2$$Register, $tmp3$$Register); +++ %} +++ +++ ins_pipe(pipe_slow); +++%} +++ +++instruct weakCompareAndSwapS(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval, +++ iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr) +++%{ +++ match(Set res (WeakCompareAndSwapS mem (Binary oldval newval))); +++ +++ ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 2 + ALU_COST * 7); +++ +++ effect(TEMP_DEF res, KILL cr, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3); +++ +++ format %{ +++ "cmpxchg_weak $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval\n\t" +++ "# $res == 1 when success, #@weakCompareAndSwapS" +++ %} +++ +++ ins_encode %{ +++ __ weak_cmpxchg_narrow_value(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int16, +++ /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register, +++ $tmp1$$Register, $tmp2$$Register, $tmp3$$Register); +++ %} +++ +++ ins_pipe(pipe_slow); +++%} +++ +++instruct weakCompareAndSwapI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval) +++%{ +++ match(Set res (WeakCompareAndSwapI mem (Binary oldval newval))); +++ +++ ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 2 + ALU_COST * 2); +++ +++ format %{ +++ "cmpxchg_weak $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval\n\t" +++ "# $res == 1 when success, #@weakCompareAndSwapI" +++ %} +++ +++ ins_encode %{ +++ __ cmpxchg_weak(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int32, +++ /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register); +++ %} +++ +++ ins_pipe(pipe_slow); +++%} +++ +++instruct weakCompareAndSwapL(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval) +++%{ +++ match(Set res (WeakCompareAndSwapL mem (Binary oldval newval))); +++ +++ ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 2 + ALU_COST * 2); +++ +++ format %{ +++ "cmpxchg_weak $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval\n\t" +++ "# $res == 1 when success, #@weakCompareAndSwapL" +++ %} +++ +++ ins_encode %{ +++ __ cmpxchg_weak(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int64, +++ /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register); +++ %} +++ +++ ins_pipe(pipe_slow); +++%} +++ +++instruct weakCompareAndSwapN(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval) +++%{ +++ match(Set res (WeakCompareAndSwapN mem (Binary oldval newval))); +++ +++ ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 2 + ALU_COST * 4); +++ +++ format %{ +++ "cmpxchg_weak $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval\n\t" +++ "# $res == 1 when success, #@weakCompareAndSwapN" +++ %} +++ +++ ins_encode %{ +++ __ cmpxchg_weak(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::uint32, +++ /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register); +++ %} +++ +++ ins_pipe(pipe_slow); +++%} +++ +++instruct weakCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval) +++%{ +++ predicate(n->as_LoadStore()->barrier_data() == 0); +++ match(Set res (WeakCompareAndSwapP mem (Binary oldval newval))); +++ +++ ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 2 + ALU_COST * 2); +++ +++ format %{ +++ "cmpxchg_weak $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval\n\t" +++ "# $res == 1 when success, #@weakCompareAndSwapP" +++ %} +++ +++ ins_encode %{ +++ __ cmpxchg_weak(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int64, +++ /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register); +++ %} +++ +++ ins_pipe(pipe_slow); +++%} +++ +++instruct weakCompareAndSwapBAcq(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval, +++ iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr) +++%{ +++ predicate(needs_acquiring_load_reserved(n)); +++ +++ match(Set res (WeakCompareAndSwapB mem (Binary oldval newval))); +++ +++ ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 2 + ALU_COST * 6); +++ +++ effect(TEMP_DEF res, KILL cr, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3); +++ +++ format %{ +++ "cmpxchg_weak_acq $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval\n\t" +++ "# $res == 1 when success, #@weakCompareAndSwapBAcq" +++ %} +++ +++ ins_encode %{ +++ __ weak_cmpxchg_narrow_value(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int8, +++ /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register, +++ $tmp1$$Register, $tmp2$$Register, $tmp3$$Register); +++ %} +++ +++ ins_pipe(pipe_slow); +++%} +++ +++instruct weakCompareAndSwapSAcq(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval, +++ iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr) +++%{ +++ predicate(needs_acquiring_load_reserved(n)); +++ +++ match(Set res (WeakCompareAndSwapS mem (Binary oldval newval))); +++ +++ ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 2 + ALU_COST * 7); +++ +++ effect(TEMP_DEF res, KILL cr, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3); +++ +++ format %{ +++ "cmpxchg_weak_acq $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval\n\t" +++ "# $res == 1 when success, #@weakCompareAndSwapSAcq" +++ %} +++ +++ ins_encode %{ +++ __ weak_cmpxchg_narrow_value(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int16, +++ /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register, +++ $tmp1$$Register, $tmp2$$Register, $tmp3$$Register); +++ %} +++ +++ ins_pipe(pipe_slow); +++%} +++ +++instruct weakCompareAndSwapIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval) +++%{ +++ predicate(needs_acquiring_load_reserved(n)); +++ +++ match(Set res (WeakCompareAndSwapI mem (Binary oldval newval))); +++ +++ ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 2 + ALU_COST * 2); +++ +++ format %{ +++ "cmpxchg_weak_acq $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval\n\t" +++ "# $res == 1 when success, #@weakCompareAndSwapIAcq" +++ %} +++ +++ ins_encode %{ +++ __ cmpxchg_weak(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int32, +++ /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register); +++ %} +++ +++ ins_pipe(pipe_slow); +++%} +++ +++instruct weakCompareAndSwapLAcq(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval) +++%{ +++ predicate(needs_acquiring_load_reserved(n)); +++ +++ match(Set res (WeakCompareAndSwapL mem (Binary oldval newval))); +++ +++ ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 2 + ALU_COST * 2); +++ +++ format %{ +++ "cmpxchg_weak_acq $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval\n\t" +++ "# $res == 1 when success, #@weakCompareAndSwapLAcq" +++ %} +++ +++ ins_encode %{ +++ __ cmpxchg_weak(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int64, +++ /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register); +++ %} +++ +++ ins_pipe(pipe_slow); +++%} +++ +++instruct weakCompareAndSwapNAcq(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval) +++%{ +++ predicate(needs_acquiring_load_reserved(n)); +++ +++ match(Set res (WeakCompareAndSwapN mem (Binary oldval newval))); +++ +++ ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 2 + ALU_COST * 4); +++ +++ format %{ +++ "cmpxchg_weak_acq $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval\n\t" +++ "# $res == 1 when success, #@weakCompareAndSwapNAcq" +++ %} +++ +++ ins_encode %{ +++ __ cmpxchg_weak(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::uint32, +++ /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register); +++ %} +++ +++ ins_pipe(pipe_slow); +++%} +++ +++instruct weakCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval) +++%{ +++ predicate(needs_acquiring_load_reserved(n) && (n->as_LoadStore()->barrier_data() == 0)); +++ +++ match(Set res (WeakCompareAndSwapP mem (Binary oldval newval))); +++ +++ ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 2 + ALU_COST * 2); +++ +++ format %{ +++ "cmpxchg_weak_acq $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval\n\t" +++ "\t# $res == 1 when success, #@weakCompareAndSwapPAcq" +++ %} +++ +++ ins_encode %{ +++ __ cmpxchg_weak(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int64, +++ /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register); +++ %} +++ +++ ins_pipe(pipe_slow); +++%} +++ +++instruct get_and_setI(indirect mem, iRegI newv, iRegINoSp prev) +++%{ +++ match(Set prev (GetAndSetI mem newv)); +++ +++ ins_cost(ALU_COST); +++ +++ format %{ "atomic_xchgw $prev, $newv, [$mem]\t#@get_and_setI" %} +++ +++ ins_encode %{ +++ __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base)); +++ %} +++ +++ ins_pipe(pipe_serial); +++%} +++ +++instruct get_and_setL(indirect mem, iRegL newv, iRegLNoSp prev) +++%{ +++ match(Set prev (GetAndSetL mem newv)); +++ +++ ins_cost(ALU_COST); +++ +++ format %{ "atomic_xchg $prev, $newv, [$mem]\t#@get_and_setL" %} +++ +++ ins_encode %{ +++ __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base)); +++ %} +++ +++ ins_pipe(pipe_serial); +++%} +++ +++instruct get_and_setN(indirect mem, iRegN newv, iRegINoSp prev) +++%{ +++ match(Set prev (GetAndSetN mem newv)); +++ +++ ins_cost(ALU_COST); +++ +++ format %{ "atomic_xchgwu $prev, $newv, [$mem]\t#@get_and_setN" %} +++ +++ ins_encode %{ +++ __ atomic_xchgwu($prev$$Register, $newv$$Register, as_Register($mem$$base)); +++ %} +++ +++ ins_pipe(pipe_serial); +++%} +++ +++instruct get_and_setP(indirect mem, iRegP newv, iRegPNoSp prev) +++%{ +++ predicate(n->as_LoadStore()->barrier_data() == 0); +++ match(Set prev (GetAndSetP mem newv)); +++ +++ ins_cost(ALU_COST); +++ +++ format %{ "atomic_xchg $prev, $newv, [$mem]\t#@get_and_setP" %} +++ +++ ins_encode %{ +++ __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base)); +++ %} +++ +++ ins_pipe(pipe_serial); +++%} +++ +++instruct get_and_setIAcq(indirect mem, iRegI newv, iRegINoSp prev) +++%{ +++ predicate(needs_acquiring_load_reserved(n)); +++ +++ match(Set prev (GetAndSetI mem newv)); +++ +++ ins_cost(ALU_COST); +++ +++ format %{ "atomic_xchgw_acq $prev, $newv, [$mem]\t#@get_and_setIAcq" %} +++ +++ ins_encode %{ +++ __ atomic_xchgalw($prev$$Register, $newv$$Register, as_Register($mem$$base)); +++ %} +++ +++ ins_pipe(pipe_serial); +++%} +++ +++instruct get_and_setLAcq(indirect mem, iRegL newv, iRegLNoSp prev) +++%{ +++ predicate(needs_acquiring_load_reserved(n)); +++ +++ match(Set prev (GetAndSetL mem newv)); +++ +++ ins_cost(ALU_COST); +++ +++ format %{ "atomic_xchg_acq $prev, $newv, [$mem]\t#@get_and_setLAcq" %} +++ +++ ins_encode %{ +++ __ atomic_xchgal($prev$$Register, $newv$$Register, as_Register($mem$$base)); +++ %} +++ +++ ins_pipe(pipe_serial); +++%} +++ +++instruct get_and_setNAcq(indirect mem, iRegN newv, iRegINoSp prev) +++%{ +++ predicate(needs_acquiring_load_reserved(n)); +++ +++ match(Set prev (GetAndSetN mem newv)); +++ +++ ins_cost(ALU_COST); +++ +++ format %{ "atomic_xchgwu_acq $prev, $newv, [$mem]\t#@get_and_setNAcq" %} +++ +++ ins_encode %{ +++ __ atomic_xchgalwu($prev$$Register, $newv$$Register, as_Register($mem$$base)); +++ %} +++ +++ ins_pipe(pipe_serial); +++%} +++ +++instruct get_and_setPAcq(indirect mem, iRegP newv, iRegPNoSp prev) +++%{ +++ predicate(needs_acquiring_load_reserved(n) && (n->as_LoadStore()->barrier_data() == 0)); +++ +++ match(Set prev (GetAndSetP mem newv)); +++ +++ ins_cost(ALU_COST); +++ +++ format %{ "atomic_xchg_acq $prev, $newv, [$mem]\t#@get_and_setPAcq" %} +++ +++ ins_encode %{ +++ __ atomic_xchgal($prev$$Register, $newv$$Register, as_Register($mem$$base)); +++ %} +++ +++ ins_pipe(pipe_serial); +++%} +++ +++instruct get_and_addL(indirect mem, iRegLNoSp newval, iRegL incr) +++%{ +++ match(Set newval (GetAndAddL mem incr)); +++ +++ ins_cost(ALU_COST); +++ +++ format %{ "get_and_addL $newval, [$mem], $incr\t#@get_and_addL" %} +++ +++ ins_encode %{ +++ __ atomic_add($newval$$Register, $incr$$Register, as_Register($mem$$base)); +++ %} +++ +++ ins_pipe(pipe_serial); +++%} +++ +++instruct get_and_addL_no_res(indirect mem, Universe dummy, iRegL incr) +++%{ +++ predicate(n->as_LoadStore()->result_not_used()); +++ +++ match(Set dummy (GetAndAddL mem incr)); +++ +++ ins_cost(ALU_COST); +++ +++ format %{ "get_and_addL [$mem], $incr\t#@get_and_addL_no_res" %} +++ +++ ins_encode %{ +++ __ atomic_add(noreg, $incr$$Register, as_Register($mem$$base)); +++ %} +++ +++ ins_pipe(pipe_serial); +++%} +++ +++instruct get_and_addLi(indirect mem, iRegLNoSp newval, immLAdd incr) +++%{ +++ match(Set newval (GetAndAddL mem incr)); +++ +++ ins_cost(ALU_COST); +++ +++ format %{ "get_and_addL $newval, [$mem], $incr\t#@get_and_addLi" %} +++ +++ ins_encode %{ +++ __ atomic_add($newval$$Register, $incr$$constant, as_Register($mem$$base)); +++ %} +++ +++ ins_pipe(pipe_serial); +++%} +++ +++instruct get_and_addLi_no_res(indirect mem, Universe dummy, immLAdd incr) +++%{ +++ predicate(n->as_LoadStore()->result_not_used()); +++ +++ match(Set dummy (GetAndAddL mem incr)); +++ +++ ins_cost(ALU_COST); +++ +++ format %{ "get_and_addL [$mem], $incr\t#@get_and_addLi_no_res" %} +++ +++ ins_encode %{ +++ __ atomic_add(noreg, $incr$$constant, as_Register($mem$$base)); +++ %} +++ +++ ins_pipe(pipe_serial); +++%} +++ +++instruct get_and_addI(indirect mem, iRegINoSp newval, iRegIorL2I incr) +++%{ +++ match(Set newval (GetAndAddI mem incr)); +++ +++ ins_cost(ALU_COST); +++ +++ format %{ "get_and_addI $newval, [$mem], $incr\t#@get_and_addI" %} +++ +++ ins_encode %{ +++ __ atomic_addw($newval$$Register, $incr$$Register, as_Register($mem$$base)); +++ %} +++ +++ ins_pipe(pipe_serial); +++%} +++ +++instruct get_and_addI_no_res(indirect mem, Universe dummy, iRegIorL2I incr) +++%{ +++ predicate(n->as_LoadStore()->result_not_used()); +++ +++ match(Set dummy (GetAndAddI mem incr)); +++ +++ ins_cost(ALU_COST); +++ +++ format %{ "get_and_addI [$mem], $incr\t#@get_and_addI_no_res" %} +++ +++ ins_encode %{ +++ __ atomic_addw(noreg, $incr$$Register, as_Register($mem$$base)); +++ %} +++ +++ ins_pipe(pipe_serial); +++%} +++ +++instruct get_and_addIi(indirect mem, iRegINoSp newval, immIAdd incr) +++%{ +++ match(Set newval (GetAndAddI mem incr)); +++ +++ ins_cost(ALU_COST); +++ +++ format %{ "get_and_addI $newval, [$mem], $incr\t#@get_and_addIi" %} +++ +++ ins_encode %{ +++ __ atomic_addw($newval$$Register, $incr$$constant, as_Register($mem$$base)); +++ %} +++ +++ ins_pipe(pipe_serial); +++%} +++ +++instruct get_and_addIi_no_res(indirect mem, Universe dummy, immIAdd incr) +++%{ +++ predicate(n->as_LoadStore()->result_not_used()); +++ +++ match(Set dummy (GetAndAddI mem incr)); +++ +++ ins_cost(ALU_COST); +++ +++ format %{ "get_and_addI [$mem], $incr\t#@get_and_addIi_no_res" %} +++ +++ ins_encode %{ +++ __ atomic_addw(noreg, $incr$$constant, as_Register($mem$$base)); +++ %} +++ +++ ins_pipe(pipe_serial); +++%} +++ +++instruct get_and_addLAcq(indirect mem, iRegLNoSp newval, iRegL incr) +++%{ +++ predicate(needs_acquiring_load_reserved(n)); +++ +++ match(Set newval (GetAndAddL mem incr)); +++ +++ ins_cost(ALU_COST); +++ +++ format %{ "get_and_addL_acq $newval, [$mem], $incr\t#@get_and_addLAcq" %} +++ +++ ins_encode %{ +++ __ atomic_addal($newval$$Register, $incr$$Register, as_Register($mem$$base)); +++ %} +++ +++ ins_pipe(pipe_serial); +++%} +++ +++instruct get_and_addL_no_resAcq(indirect mem, Universe dummy, iRegL incr) %{ +++ predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_reserved(n)); +++ +++ match(Set dummy (GetAndAddL mem incr)); +++ +++ ins_cost(ALU_COST); +++ +++ format %{ "get_and_addL_acq [$mem], $incr\t#@get_and_addL_no_resAcq" %} +++ +++ ins_encode %{ +++ __ atomic_addal(noreg, $incr$$Register, as_Register($mem$$base)); +++ %} +++ +++ ins_pipe(pipe_serial); +++%} +++ +++instruct get_and_addLiAcq(indirect mem, iRegLNoSp newval, immLAdd incr) +++%{ +++ predicate(needs_acquiring_load_reserved(n)); +++ +++ match(Set newval (GetAndAddL mem incr)); +++ +++ ins_cost(ALU_COST); +++ +++ format %{ "get_and_addL_acq $newval, [$mem], $incr\t#@get_and_addLiAcq" %} +++ +++ ins_encode %{ +++ __ atomic_addal($newval$$Register, $incr$$constant, as_Register($mem$$base)); +++ %} +++ +++ ins_pipe(pipe_serial); +++%} +++ +++instruct get_and_addLi_no_resAcq(indirect mem, Universe dummy, immLAdd incr) +++%{ +++ predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_reserved(n)); +++ +++ match(Set dummy (GetAndAddL mem incr)); +++ +++ ins_cost(ALU_COST); +++ +++ format %{ "get_and_addL_acq [$mem], $incr\t#@get_and_addLi_no_resAcq" %} +++ +++ ins_encode %{ +++ __ atomic_addal(noreg, $incr$$constant, as_Register($mem$$base)); +++ %} +++ +++ ins_pipe(pipe_serial); +++%} +++ +++instruct get_and_addIAcq(indirect mem, iRegINoSp newval, iRegIorL2I incr) +++%{ +++ predicate(needs_acquiring_load_reserved(n)); +++ +++ match(Set newval (GetAndAddI mem incr)); +++ +++ ins_cost(ALU_COST); +++ +++ format %{ "get_and_addI_acq $newval, [$mem], $incr\t#@get_and_addIAcq" %} +++ +++ ins_encode %{ +++ __ atomic_addalw($newval$$Register, $incr$$Register, as_Register($mem$$base)); +++ %} +++ +++ ins_pipe(pipe_serial); +++%} +++ +++instruct get_and_addI_no_resAcq(indirect mem, Universe dummy, iRegIorL2I incr) +++%{ +++ predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_reserved(n)); +++ +++ match(Set dummy (GetAndAddI mem incr)); +++ +++ ins_cost(ALU_COST); +++ +++ format %{ "get_and_addI_acq [$mem], $incr\t#@get_and_addI_no_resAcq" %} +++ +++ ins_encode %{ +++ __ atomic_addalw(noreg, $incr$$Register, as_Register($mem$$base)); +++ %} +++ +++ ins_pipe(pipe_serial); +++%} +++ +++instruct get_and_addIiAcq(indirect mem, iRegINoSp newval, immIAdd incr) +++%{ +++ predicate(needs_acquiring_load_reserved(n)); +++ +++ match(Set newval (GetAndAddI mem incr)); +++ +++ ins_cost(ALU_COST); +++ +++ format %{ "get_and_addI_acq $newval, [$mem], $incr\t#@get_and_addIiAcq" %} +++ +++ ins_encode %{ +++ __ atomic_addalw($newval$$Register, $incr$$constant, as_Register($mem$$base)); +++ %} +++ +++ ins_pipe(pipe_serial); +++%} +++ +++instruct get_and_addIi_no_resAcq(indirect mem, Universe dummy, immIAdd incr) +++%{ +++ predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_reserved(n)); +++ +++ match(Set dummy (GetAndAddI mem incr)); +++ +++ ins_cost(ALU_COST); +++ +++ format %{ "get_and_addI_acq [$mem], $incr\t#@get_and_addIi_no_resAcq" %} +++ +++ ins_encode %{ +++ __ atomic_addalw(noreg, $incr$$constant, as_Register($mem$$base)); +++ %} +++ +++ ins_pipe(pipe_serial); +++%} +++ +++// ============================================================================ +++// Arithmetic Instructions +++// +++ +++// Integer Addition +++ +++// TODO +++// these currently employ operations which do not set CR and hence are +++// not flagged as killing CR but we would like to isolate the cases +++// where we want to set flags from those where we don't. need to work +++// out how to do that. +++instruct addI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{ +++ match(Set dst (AddI src1 src2)); +++ +++ ins_cost(ALU_COST); +++ format %{ "addw $dst, $src1, $src2\t#@addI_reg_reg" %} +++ +++ ins_encode %{ +++ __ addw(as_Register($dst$$reg), +++ as_Register($src1$$reg), +++ as_Register($src2$$reg)); +++ %} +++ +++ ins_pipe(ialu_reg_reg); +++%} +++ +++instruct addI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAdd src2) %{ +++ match(Set dst (AddI src1 src2)); +++ +++ ins_cost(ALU_COST); +++ format %{ "addiw $dst, $src1, $src2\t#@addI_reg_imm" %} +++ +++ ins_encode %{ +++ int32_t con = (int32_t)$src2$$constant; +++ __ addiw(as_Register($dst$$reg), +++ as_Register($src1$$reg), +++ $src2$$constant); +++ %} +++ +++ ins_pipe(ialu_reg_imm); +++%} +++ +++instruct addI_reg_imm_l2i(iRegINoSp dst, iRegL src1, immIAdd src2) %{ +++ match(Set dst (AddI (ConvL2I src1) src2)); +++ +++ ins_cost(ALU_COST); +++ format %{ "addiw $dst, $src1, $src2\t#@addI_reg_imm_l2i" %} +++ +++ ins_encode %{ +++ __ addiw(as_Register($dst$$reg), +++ as_Register($src1$$reg), +++ $src2$$constant); +++ %} +++ +++ ins_pipe(ialu_reg_imm); +++%} +++ +++// Pointer Addition +++instruct addP_reg_reg(iRegPNoSp dst, iRegP src1, iRegL src2) %{ +++ match(Set dst (AddP src1 src2)); +++ +++ ins_cost(ALU_COST); +++ format %{ "add $dst, $src1, $src2\t# ptr, #@addP_reg_reg" %} +++ +++ ins_encode %{ +++ __ add(as_Register($dst$$reg), +++ as_Register($src1$$reg), +++ as_Register($src2$$reg)); +++ %} +++ +++ ins_pipe(ialu_reg_reg); +++%} +++ +++// If we shift more than 32 bits, we need not convert I2L. +++instruct lShiftL_regI_immGE32(iRegLNoSp dst, iRegI src, uimmI6_ge32 scale) %{ +++ match(Set dst (LShiftL (ConvI2L src) scale)); +++ ins_cost(ALU_COST); +++ format %{ "slli $dst, $src, $scale & 63\t#@lShiftL_regI_immGE32" %} +++ +++ ins_encode %{ +++ __ slli(as_Register($dst$$reg), as_Register($src$$reg), $scale$$constant & 63); +++ %} +++ +++ ins_pipe(ialu_reg_shift); +++%} +++ +++// Pointer Immediate Addition +++// n.b. this needs to be more expensive than using an indirect memory +++// operand +++instruct addP_reg_imm(iRegPNoSp dst, iRegP src1, immLAdd src2) %{ +++ match(Set dst (AddP src1 src2)); +++ ins_cost(ALU_COST); +++ format %{ "addi $dst, $src1, $src2\t# ptr, #@addP_reg_imm" %} +++ +++ ins_encode %{ +++ // src2 is imm, so actually call the addi +++ __ add(as_Register($dst$$reg), +++ as_Register($src1$$reg), +++ $src2$$constant); +++ %} +++ +++ ins_pipe(ialu_reg_imm); +++%} +++ +++// Long Addition +++instruct addL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{ +++ match(Set dst (AddL src1 src2)); +++ ins_cost(ALU_COST); +++ format %{ "add $dst, $src1, $src2\t#@addL_reg_reg" %} +++ +++ ins_encode %{ +++ __ add(as_Register($dst$$reg), +++ as_Register($src1$$reg), +++ as_Register($src2$$reg)); +++ %} +++ +++ ins_pipe(ialu_reg_reg); +++%} +++ +++// No constant pool entries requiredLong Immediate Addition. +++instruct addL_reg_imm(iRegLNoSp dst, iRegL src1, immLAdd src2) %{ +++ match(Set dst (AddL src1 src2)); +++ ins_cost(ALU_COST); +++ format %{ "addi $dst, $src1, $src2\t#@addL_reg_imm" %} +++ +++ ins_encode %{ +++ // src2 is imm, so actually call the addi +++ __ add(as_Register($dst$$reg), +++ as_Register($src1$$reg), +++ $src2$$constant); +++ %} +++ +++ ins_pipe(ialu_reg_imm); +++%} +++ +++// Integer Subtraction +++instruct subI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{ +++ match(Set dst (SubI src1 src2)); +++ +++ ins_cost(ALU_COST); +++ format %{ "subw $dst, $src1, $src2\t#@subI_reg_reg" %} +++ +++ ins_encode %{ +++ __ subw(as_Register($dst$$reg), +++ as_Register($src1$$reg), +++ as_Register($src2$$reg)); +++ %} +++ +++ ins_pipe(ialu_reg_reg); +++%} +++ +++// Immediate Subtraction +++instruct subI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immISub src2) %{ +++ match(Set dst (SubI src1 src2)); +++ +++ ins_cost(ALU_COST); +++ format %{ "addiw $dst, $src1, -$src2\t#@subI_reg_imm" %} +++ +++ ins_encode %{ +++ // src2 is imm, so actually call the addiw +++ __ subw(as_Register($dst$$reg), +++ as_Register($src1$$reg), +++ $src2$$constant); +++ %} +++ +++ ins_pipe(ialu_reg_imm); +++%} +++ +++// Long Subtraction +++instruct subL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{ +++ match(Set dst (SubL src1 src2)); +++ ins_cost(ALU_COST); +++ format %{ "sub $dst, $src1, $src2\t#@subL_reg_reg" %} +++ +++ ins_encode %{ +++ __ sub(as_Register($dst$$reg), +++ as_Register($src1$$reg), +++ as_Register($src2$$reg)); +++ %} +++ +++ ins_pipe(ialu_reg_reg); +++%} +++ +++// No constant pool entries requiredLong Immediate Subtraction. +++instruct subL_reg_imm(iRegLNoSp dst, iRegL src1, immLSub src2) %{ +++ match(Set dst (SubL src1 src2)); +++ ins_cost(ALU_COST); +++ format %{ "addi $dst, $src1, -$src2\t#@subL_reg_imm" %} +++ +++ ins_encode %{ +++ // src2 is imm, so actually call the addi +++ __ sub(as_Register($dst$$reg), +++ as_Register($src1$$reg), +++ $src2$$constant); +++ %} +++ +++ ins_pipe(ialu_reg_imm); +++%} +++ +++// Integer Negation (special case for sub) +++ +++instruct negI_reg(iRegINoSp dst, iRegIorL2I src, immI0 zero) %{ +++ match(Set dst (SubI zero src)); +++ ins_cost(ALU_COST); +++ format %{ "subw $dst, x0, $src\t# int, #@negI_reg" %} +++ +++ ins_encode %{ +++ // actually call the subw +++ __ negw(as_Register($dst$$reg), +++ as_Register($src$$reg)); +++ %} +++ +++ ins_pipe(ialu_reg); +++%} +++ +++// Long Negation +++ +++instruct negL_reg(iRegLNoSp dst, iRegL src, immL0 zero) %{ +++ match(Set dst (SubL zero src)); +++ ins_cost(ALU_COST); +++ format %{ "sub $dst, x0, $src\t# long, #@negL_reg" %} +++ +++ ins_encode %{ +++ // actually call the sub +++ __ neg(as_Register($dst$$reg), +++ as_Register($src$$reg)); +++ %} +++ +++ ins_pipe(ialu_reg); +++%} +++ +++// Integer Multiply +++ +++instruct mulI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{ +++ match(Set dst (MulI src1 src2)); +++ ins_cost(IMUL_COST); +++ format %{ "mulw $dst, $src1, $src2\t#@mulI" %} +++ +++ //this means 2 word multi, and no sign extend to 64 bits +++ ins_encode %{ +++ // riscv64 mulw will sign-extension to high 32 bits in dst reg +++ __ mulw(as_Register($dst$$reg), +++ as_Register($src1$$reg), +++ as_Register($src2$$reg)); +++ %} +++ +++ ins_pipe(imul_reg_reg); +++%} +++ +++// Long Multiply +++ +++instruct mulL(iRegLNoSp dst, iRegL src1, iRegL src2) %{ +++ match(Set dst (MulL src1 src2)); +++ ins_cost(IMUL_COST); +++ format %{ "mul $dst, $src1, $src2\t#@mulL" %} +++ +++ ins_encode %{ +++ __ mul(as_Register($dst$$reg), +++ as_Register($src1$$reg), +++ as_Register($src2$$reg)); +++ %} +++ +++ ins_pipe(lmul_reg_reg); +++%} +++ +++instruct mulHiL_rReg(iRegLNoSp dst, iRegL src1, iRegL src2) +++%{ +++ match(Set dst (MulHiL src1 src2)); +++ ins_cost(IMUL_COST); +++ format %{ "mulh $dst, $src1, $src2\t# mulhi, #@mulHiL_rReg" %} +++ +++ ins_encode %{ +++ __ mulh(as_Register($dst$$reg), +++ as_Register($src1$$reg), +++ as_Register($src2$$reg)); +++ %} +++ +++ ins_pipe(lmul_reg_reg); +++%} +++ +++// Integer Divide +++ +++instruct divI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{ +++ match(Set dst (DivI src1 src2)); +++ ins_cost(IDIVSI_COST); +++ format %{ "divw $dst, $src1, $src2\t#@divI"%} +++ +++ ins_encode(riscv_enc_divw(dst, src1, src2)); +++ ins_pipe(idiv_reg_reg); +++%} +++ +++instruct signExtract(iRegINoSp dst, iRegIorL2I src1, immI_31 div1, immI_31 div2) %{ +++ match(Set dst (URShiftI (RShiftI src1 div1) div2)); +++ ins_cost(ALU_COST); +++ format %{ "srliw $dst, $src1, $div1\t# int signExtract, #@signExtract" %} +++ +++ ins_encode %{ +++ __ srliw(as_Register($dst$$reg), as_Register($src1$$reg), 31); +++ %} +++ ins_pipe(ialu_reg_shift); +++%} +++ +++// Long Divide +++ +++instruct divL(iRegLNoSp dst, iRegL src1, iRegL src2) %{ +++ match(Set dst (DivL src1 src2)); +++ ins_cost(IDIVDI_COST); +++ format %{ "div $dst, $src1, $src2\t#@divL" %} +++ +++ ins_encode(riscv_enc_div(dst, src1, src2)); +++ ins_pipe(ldiv_reg_reg); +++%} +++ +++instruct signExtractL(iRegLNoSp dst, iRegL src1, immI_63 div1, immI_63 div2) %{ +++ match(Set dst (URShiftL (RShiftL src1 div1) div2)); +++ ins_cost(ALU_COST); +++ format %{ "srli $dst, $src1, $div1\t# long signExtract, #@signExtractL" %} +++ +++ ins_encode %{ +++ __ srli(as_Register($dst$$reg), as_Register($src1$$reg), 63); +++ %} +++ ins_pipe(ialu_reg_shift); +++%} +++ +++// Integer Remainder +++ +++instruct modI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{ +++ match(Set dst (ModI src1 src2)); +++ ins_cost(IDIVSI_COST); +++ format %{ "remw $dst, $src1, $src2\t#@modI" %} +++ +++ ins_encode(riscv_enc_modw(dst, src1, src2)); +++ ins_pipe(ialu_reg_reg); +++%} +++ +++// Long Remainder +++ +++instruct modL(iRegLNoSp dst, iRegL src1, iRegL src2) %{ +++ match(Set dst (ModL src1 src2)); +++ ins_cost(IDIVDI_COST); +++ format %{ "rem $dst, $src1, $src2\t#@modL" %} +++ +++ ins_encode(riscv_enc_mod(dst, src1, src2)); +++ ins_pipe(ialu_reg_reg); +++%} +++ +++// Integer Shifts +++ +++// Shift Left Register +++// In RV64I, only the low 5 bits of src2 are considered for the shift amount +++instruct lShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{ +++ match(Set dst (LShiftI src1 src2)); +++ ins_cost(ALU_COST); +++ format %{ "sllw $dst, $src1, $src2\t#@lShiftI_reg_reg" %} +++ +++ ins_encode %{ +++ __ sllw(as_Register($dst$$reg), +++ as_Register($src1$$reg), +++ as_Register($src2$$reg)); +++ %} +++ +++ ins_pipe(ialu_reg_reg_vshift); +++%} +++ +++// Shift Left Immediate +++instruct lShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{ +++ match(Set dst (LShiftI src1 src2)); +++ ins_cost(ALU_COST); +++ format %{ "slliw $dst, $src1, ($src2 & 0x1f)\t#@lShiftI_reg_imm" %} +++ +++ ins_encode %{ +++ // the shift amount is encoded in the lower +++ // 5 bits of the I-immediate field for RV32I +++ __ slliw(as_Register($dst$$reg), +++ as_Register($src1$$reg), +++ (unsigned) $src2$$constant & 0x1f); +++ %} +++ +++ ins_pipe(ialu_reg_shift); +++%} +++ +++// Shift Right Logical Register +++// In RV64I, only the low 5 bits of src2 are considered for the shift amount +++instruct urShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{ +++ match(Set dst (URShiftI src1 src2)); +++ ins_cost(ALU_COST); +++ format %{ "srlw $dst, $src1, $src2\t#@urShiftI_reg_reg" %} +++ +++ ins_encode %{ +++ __ srlw(as_Register($dst$$reg), +++ as_Register($src1$$reg), +++ as_Register($src2$$reg)); +++ %} +++ +++ ins_pipe(ialu_reg_reg_vshift); +++%} +++ +++// Shift Right Logical Immediate +++instruct urShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{ +++ match(Set dst (URShiftI src1 src2)); +++ ins_cost(ALU_COST); +++ format %{ "srliw $dst, $src1, ($src2 & 0x1f)\t#@urShiftI_reg_imm" %} +++ +++ ins_encode %{ +++ // the shift amount is encoded in the lower +++ // 6 bits of the I-immediate field for RV64I +++ __ srliw(as_Register($dst$$reg), +++ as_Register($src1$$reg), +++ (unsigned) $src2$$constant & 0x1f); +++ %} +++ +++ ins_pipe(ialu_reg_shift); +++%} +++ +++// Shift Right Arithmetic Register +++// In RV64I, only the low 5 bits of src2 are considered for the shift amount +++instruct rShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{ +++ match(Set dst (RShiftI src1 src2)); +++ ins_cost(ALU_COST); +++ format %{ "sraw $dst, $src1, $src2\t#@rShiftI_reg_reg" %} +++ +++ ins_encode %{ +++ // riscv will sign-ext dst high 32 bits +++ __ sraw(as_Register($dst$$reg), +++ as_Register($src1$$reg), +++ as_Register($src2$$reg)); +++ %} +++ +++ ins_pipe(ialu_reg_reg_vshift); +++%} +++ +++// Shift Right Arithmetic Immediate +++instruct rShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{ +++ match(Set dst (RShiftI src1 src2)); +++ ins_cost(ALU_COST); +++ format %{ "sraiw $dst, $src1, ($src2 & 0x1f)\t#@rShiftI_reg_imm" %} +++ +++ ins_encode %{ +++ // riscv will sign-ext dst high 32 bits +++ __ sraiw(as_Register($dst$$reg), +++ as_Register($src1$$reg), +++ (unsigned) $src2$$constant & 0x1f); +++ %} +++ +++ ins_pipe(ialu_reg_shift); +++%} +++ +++// Long Shifts +++ +++// Shift Left Register +++// In RV64I, only the low 6 bits of src2 are considered for the shift amount +++instruct lShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{ +++ match(Set dst (LShiftL src1 src2)); +++ +++ ins_cost(ALU_COST); +++ format %{ "sll $dst, $src1, $src2\t#@lShiftL_reg_reg" %} +++ +++ ins_encode %{ +++ __ sll(as_Register($dst$$reg), +++ as_Register($src1$$reg), +++ as_Register($src2$$reg)); +++ %} +++ +++ ins_pipe(ialu_reg_reg_vshift); +++%} +++ +++// Shift Left Immediate +++instruct lShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{ +++ match(Set dst (LShiftL src1 src2)); +++ +++ ins_cost(ALU_COST); +++ format %{ "slli $dst, $src1, ($src2 & 0x3f)\t#@lShiftL_reg_imm" %} +++ +++ ins_encode %{ +++ // the shift amount is encoded in the lower +++ // 6 bits of the I-immediate field for RV64I +++ __ slli(as_Register($dst$$reg), +++ as_Register($src1$$reg), +++ (unsigned) $src2$$constant & 0x3f); +++ %} +++ +++ ins_pipe(ialu_reg_shift); +++%} +++ +++// Shift Right Logical Register +++// In RV64I, only the low 6 bits of src2 are considered for the shift amount +++instruct urShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{ +++ match(Set dst (URShiftL src1 src2)); +++ +++ ins_cost(ALU_COST); +++ format %{ "srl $dst, $src1, $src2\t#@urShiftL_reg_reg" %} +++ +++ ins_encode %{ +++ __ srl(as_Register($dst$$reg), +++ as_Register($src1$$reg), +++ as_Register($src2$$reg)); +++ %} +++ +++ ins_pipe(ialu_reg_reg_vshift); +++%} +++ +++// Shift Right Logical Immediate +++instruct urShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{ +++ match(Set dst (URShiftL src1 src2)); +++ +++ ins_cost(ALU_COST); +++ format %{ "srli $dst, $src1, ($src2 & 0x3f)\t#@urShiftL_reg_imm" %} +++ +++ ins_encode %{ +++ // the shift amount is encoded in the lower +++ // 6 bits of the I-immediate field for RV64I +++ __ srli(as_Register($dst$$reg), +++ as_Register($src1$$reg), +++ (unsigned) $src2$$constant & 0x3f); +++ %} +++ +++ ins_pipe(ialu_reg_shift); +++%} +++ +++// A special-case pattern for card table stores. +++instruct urShiftP_reg_imm(iRegLNoSp dst, iRegP src1, immI src2) %{ +++ match(Set dst (URShiftL (CastP2X src1) src2)); +++ +++ ins_cost(ALU_COST); +++ format %{ "srli $dst, p2x($src1), ($src2 & 0x3f)\t#@urShiftP_reg_imm" %} +++ +++ ins_encode %{ +++ // the shift amount is encoded in the lower +++ // 6 bits of the I-immediate field for RV64I +++ __ srli(as_Register($dst$$reg), +++ as_Register($src1$$reg), +++ (unsigned) $src2$$constant & 0x3f); +++ %} +++ +++ ins_pipe(ialu_reg_shift); +++%} +++ +++// Shift Right Arithmetic Register +++// In RV64I, only the low 6 bits of src2 are considered for the shift amount +++instruct rShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{ +++ match(Set dst (RShiftL src1 src2)); +++ +++ ins_cost(ALU_COST); +++ format %{ "sra $dst, $src1, $src2\t#@rShiftL_reg_reg" %} +++ +++ ins_encode %{ +++ __ sra(as_Register($dst$$reg), +++ as_Register($src1$$reg), +++ as_Register($src2$$reg)); +++ %} +++ +++ ins_pipe(ialu_reg_reg_vshift); +++%} +++ +++// Shift Right Arithmetic Immediate +++instruct rShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{ +++ match(Set dst (RShiftL src1 src2)); +++ +++ ins_cost(ALU_COST); +++ format %{ "srai $dst, $src1, ($src2 & 0x3f)\t#@rShiftL_reg_imm" %} +++ +++ ins_encode %{ +++ // the shift amount is encoded in the lower +++ // 6 bits of the I-immediate field for RV64I +++ __ srai(as_Register($dst$$reg), +++ as_Register($src1$$reg), +++ (unsigned) $src2$$constant & 0x3f); +++ %} +++ +++ ins_pipe(ialu_reg_shift); +++%} +++ +++instruct regI_not_reg(iRegINoSp dst, iRegI src1, immI_M1 m1) %{ +++ match(Set dst (XorI src1 m1)); +++ ins_cost(ALU_COST); +++ format %{ "xori $dst, $src1, -1\t#@regI_not_reg" %} +++ +++ ins_encode %{ +++ __ xori(as_Register($dst$$reg), as_Register($src1$$reg), -1); +++ %} +++ +++ ins_pipe(ialu_reg_imm); +++%} +++ +++instruct regL_not_reg(iRegLNoSp dst, iRegL src1, immL_M1 m1) %{ +++ match(Set dst (XorL src1 m1)); +++ ins_cost(ALU_COST); +++ format %{ "xori $dst, $src1, -1\t#@regL_not_reg" %} +++ +++ ins_encode %{ +++ __ xori(as_Register($dst$$reg), as_Register($src1$$reg), -1); +++ %} +++ +++ ins_pipe(ialu_reg_imm); +++%} +++ +++ +++// ============================================================================ +++// Floating Point Arithmetic Instructions +++ +++instruct addF_reg_reg(fRegF dst, fRegF src1, fRegF src2) %{ +++ match(Set dst (AddF src1 src2)); +++ +++ ins_cost(DEFAULT_COST * 5); +++ format %{ "fadd.s $dst, $src1, $src2\t#@addF_reg_reg" %} +++ +++ ins_encode %{ +++ __ fadd_s(as_FloatRegister($dst$$reg), +++ as_FloatRegister($src1$$reg), +++ as_FloatRegister($src2$$reg)); +++ %} +++ +++ ins_pipe(fp_dop_reg_reg_s); +++%} +++ +++instruct addD_reg_reg(fRegD dst, fRegD src1, fRegD src2) %{ +++ match(Set dst (AddD src1 src2)); +++ +++ ins_cost(DEFAULT_COST * 5); +++ format %{ "fadd.d $dst, $src1, $src2\t#@addD_reg_reg" %} +++ +++ ins_encode %{ +++ __ fadd_d(as_FloatRegister($dst$$reg), +++ as_FloatRegister($src1$$reg), +++ as_FloatRegister($src2$$reg)); +++ %} +++ +++ ins_pipe(fp_dop_reg_reg_d); +++%} +++ +++instruct subF_reg_reg(fRegF dst, fRegF src1, fRegF src2) %{ +++ match(Set dst (SubF src1 src2)); +++ +++ ins_cost(DEFAULT_COST * 5); +++ format %{ "fsub.s $dst, $src1, $src2\t#@subF_reg_reg" %} +++ +++ ins_encode %{ +++ __ fsub_s(as_FloatRegister($dst$$reg), +++ as_FloatRegister($src1$$reg), +++ as_FloatRegister($src2$$reg)); +++ %} +++ +++ ins_pipe(fp_dop_reg_reg_s); +++%} +++ +++instruct subD_reg_reg(fRegD dst, fRegD src1, fRegD src2) %{ +++ match(Set dst (SubD src1 src2)); +++ +++ ins_cost(DEFAULT_COST * 5); +++ format %{ "fsub.d $dst, $src1, $src2\t#@subD_reg_reg" %} +++ +++ ins_encode %{ +++ __ fsub_d(as_FloatRegister($dst$$reg), +++ as_FloatRegister($src1$$reg), +++ as_FloatRegister($src2$$reg)); +++ %} +++ +++ ins_pipe(fp_dop_reg_reg_d); +++%} +++ +++instruct mulF_reg_reg(fRegF dst, fRegF src1, fRegF src2) %{ +++ match(Set dst (MulF src1 src2)); +++ +++ ins_cost(FMUL_SINGLE_COST); +++ format %{ "fmul.s $dst, $src1, $src2\t#@mulF_reg_reg" %} +++ +++ ins_encode %{ +++ __ fmul_s(as_FloatRegister($dst$$reg), +++ as_FloatRegister($src1$$reg), +++ as_FloatRegister($src2$$reg)); +++ %} +++ +++ ins_pipe(fp_dop_reg_reg_s); +++%} +++ +++instruct mulD_reg_reg(fRegD dst, fRegD src1, fRegD src2) %{ +++ match(Set dst (MulD src1 src2)); +++ +++ ins_cost(FMUL_DOUBLE_COST); +++ format %{ "fmul.d $dst, $src1, $src2\t#@mulD_reg_reg" %} +++ +++ ins_encode %{ +++ __ fmul_d(as_FloatRegister($dst$$reg), +++ as_FloatRegister($src1$$reg), +++ as_FloatRegister($src2$$reg)); +++ %} +++ +++ ins_pipe(fp_dop_reg_reg_d); +++%} +++ +++// src1 * src2 + src3 +++instruct maddF_reg_reg(fRegF dst, fRegF src1, fRegF src2, fRegF src3) %{ +++ predicate(UseFMA); +++ match(Set dst (FmaF src3 (Binary src1 src2))); +++ +++ ins_cost(FMUL_SINGLE_COST); +++ format %{ "fmadd.s $dst, $src1, $src2, $src3\t#@maddF_reg_reg" %} +++ +++ ins_encode %{ +++ __ fmadd_s(as_FloatRegister($dst$$reg), +++ as_FloatRegister($src1$$reg), +++ as_FloatRegister($src2$$reg), +++ as_FloatRegister($src3$$reg)); +++ %} +++ +++ ins_pipe(pipe_class_default); +++%} +++ +++// src1 * src2 + src3 +++instruct maddD_reg_reg(fRegD dst, fRegD src1, fRegD src2, fRegD src3) %{ +++ predicate(UseFMA); +++ match(Set dst (FmaD src3 (Binary src1 src2))); +++ +++ ins_cost(FMUL_DOUBLE_COST); +++ format %{ "fmadd.d $dst, $src1, $src2, $src3\t#@maddD_reg_reg" %} +++ +++ ins_encode %{ +++ __ fmadd_d(as_FloatRegister($dst$$reg), +++ as_FloatRegister($src1$$reg), +++ as_FloatRegister($src2$$reg), +++ as_FloatRegister($src3$$reg)); +++ %} +++ +++ ins_pipe(pipe_class_default); +++%} +++ +++// src1 * src2 - src3 +++instruct msubF_reg_reg(fRegF dst, fRegF src1, fRegF src2, fRegF src3) %{ +++ predicate(UseFMA); +++ match(Set dst (FmaF (NegF src3) (Binary src1 src2))); +++ +++ ins_cost(FMUL_SINGLE_COST); +++ format %{ "fmsub.s $dst, $src1, $src2, $src3\t#@msubF_reg_reg" %} +++ +++ ins_encode %{ +++ __ fmsub_s(as_FloatRegister($dst$$reg), +++ as_FloatRegister($src1$$reg), +++ as_FloatRegister($src2$$reg), +++ as_FloatRegister($src3$$reg)); +++ %} +++ +++ ins_pipe(pipe_class_default); +++%} +++ +++// src1 * src2 - src3 +++instruct msubD_reg_reg(fRegD dst, fRegD src1, fRegD src2, fRegD src3) %{ +++ predicate(UseFMA); +++ match(Set dst (FmaD (NegD src3) (Binary src1 src2))); +++ +++ ins_cost(FMUL_DOUBLE_COST); +++ format %{ "fmsub.d $dst, $src1, $src2, $src3\t#@msubD_reg_reg" %} +++ +++ ins_encode %{ +++ __ fmsub_d(as_FloatRegister($dst$$reg), +++ as_FloatRegister($src1$$reg), +++ as_FloatRegister($src2$$reg), +++ as_FloatRegister($src3$$reg)); +++ %} +++ +++ ins_pipe(pipe_class_default); +++%} +++ +++// -src1 * src2 + src3 +++instruct nmsubF_reg_reg(fRegF dst, fRegF src1, fRegF src2, fRegF src3) %{ +++ predicate(UseFMA); +++ match(Set dst (FmaF src3 (Binary (NegF src1) src2))); +++ match(Set dst (FmaF src3 (Binary src1 (NegF src2)))); +++ +++ ins_cost(FMUL_SINGLE_COST); +++ format %{ "fnmsub.s $dst, $src1, $src2, $src3\t#@nmsubF_reg_reg" %} +++ +++ ins_encode %{ +++ __ fnmsub_s(as_FloatRegister($dst$$reg), +++ as_FloatRegister($src1$$reg), +++ as_FloatRegister($src2$$reg), +++ as_FloatRegister($src3$$reg)); +++ %} +++ +++ ins_pipe(pipe_class_default); +++%} +++ +++// -src1 * src2 + src3 +++instruct nmsubD_reg_reg(fRegD dst, fRegD src1, fRegD src2, fRegD src3) %{ +++ predicate(UseFMA); +++ match(Set dst (FmaD src3 (Binary (NegD src1) src2))); +++ match(Set dst (FmaD src3 (Binary src1 (NegD src2)))); +++ +++ ins_cost(FMUL_DOUBLE_COST); +++ format %{ "fnmsub.d $dst, $src1, $src2, $src3\t#@nmsubD_reg_reg" %} +++ +++ ins_encode %{ +++ __ fnmsub_d(as_FloatRegister($dst$$reg), +++ as_FloatRegister($src1$$reg), +++ as_FloatRegister($src2$$reg), +++ as_FloatRegister($src3$$reg)); +++ %} +++ +++ ins_pipe(pipe_class_default); +++%} +++ +++// -src1 * src2 - src3 +++instruct nmaddF_reg_reg(fRegF dst, fRegF src1, fRegF src2, fRegF src3) %{ +++ predicate(UseFMA); +++ match(Set dst (FmaF (NegF src3) (Binary (NegF src1) src2))); +++ match(Set dst (FmaF (NegF src3) (Binary src1 (NegF src2)))); +++ +++ ins_cost(FMUL_SINGLE_COST); +++ format %{ "fnmadd.s $dst, $src1, $src2, $src3\t#@nmaddF_reg_reg" %} +++ +++ ins_encode %{ +++ __ fnmadd_s(as_FloatRegister($dst$$reg), +++ as_FloatRegister($src1$$reg), +++ as_FloatRegister($src2$$reg), +++ as_FloatRegister($src3$$reg)); +++ %} +++ +++ ins_pipe(pipe_class_default); +++%} +++ +++// -src1 * src2 - src3 +++instruct nmaddD_reg_reg(fRegD dst, fRegD src1, fRegD src2, fRegD src3) %{ +++ predicate(UseFMA); +++ match(Set dst (FmaD (NegD src3) (Binary (NegD src1) src2))); +++ match(Set dst (FmaD (NegD src3) (Binary src1 (NegD src2)))); +++ +++ ins_cost(FMUL_DOUBLE_COST); +++ format %{ "fnmadd.d $dst, $src1, $src2, $src3\t#@nmaddD_reg_reg" %} +++ +++ ins_encode %{ +++ __ fnmadd_d(as_FloatRegister($dst$$reg), +++ as_FloatRegister($src1$$reg), +++ as_FloatRegister($src2$$reg), +++ as_FloatRegister($src3$$reg)); +++ %} +++ +++ ins_pipe(pipe_class_default); +++%} +++ +++// Math.max(FF)F +++instruct maxF_reg_reg(fRegF dst, fRegF src1, fRegF src2, rFlagsReg cr) %{ +++ match(Set dst (MaxF src1 src2)); +++ effect(TEMP_DEF dst, KILL cr); +++ +++ format %{ "maxF $dst, $src1, $src2" %} +++ +++ ins_encode %{ +++ __ minmax_FD(as_FloatRegister($dst$$reg), +++ as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg), +++ false /* is_double */, false /* is_min */); +++ %} +++ +++ ins_pipe(pipe_class_default); +++%} +++ +++// Math.min(FF)F +++instruct minF_reg_reg(fRegF dst, fRegF src1, fRegF src2, rFlagsReg cr) %{ +++ match(Set dst (MinF src1 src2)); +++ effect(TEMP_DEF dst, KILL cr); +++ +++ format %{ "minF $dst, $src1, $src2" %} +++ +++ ins_encode %{ +++ __ minmax_FD(as_FloatRegister($dst$$reg), +++ as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg), +++ false /* is_double */, true /* is_min */); +++ %} +++ +++ ins_pipe(pipe_class_default); +++%} +++ +++// Math.max(DD)D +++instruct maxD_reg_reg(fRegD dst, fRegD src1, fRegD src2, rFlagsReg cr) %{ +++ match(Set dst (MaxD src1 src2)); +++ effect(TEMP_DEF dst, KILL cr); +++ +++ format %{ "maxD $dst, $src1, $src2" %} +++ +++ ins_encode %{ +++ __ minmax_FD(as_FloatRegister($dst$$reg), +++ as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg), +++ true /* is_double */, false /* is_min */); +++ %} +++ +++ ins_pipe(pipe_class_default); +++%} +++ +++// Math.min(DD)D +++instruct minD_reg_reg(fRegD dst, fRegD src1, fRegD src2, rFlagsReg cr) %{ +++ match(Set dst (MinD src1 src2)); +++ effect(TEMP_DEF dst, KILL cr); +++ +++ format %{ "minD $dst, $src1, $src2" %} +++ +++ ins_encode %{ +++ __ minmax_FD(as_FloatRegister($dst$$reg), +++ as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg), +++ true /* is_double */, true /* is_min */); +++ %} +++ +++ ins_pipe(pipe_class_default); +++%} +++ +++instruct divF_reg_reg(fRegF dst, fRegF src1, fRegF src2) %{ +++ match(Set dst (DivF src1 src2)); +++ +++ ins_cost(FDIV_COST); +++ format %{ "fdiv.s $dst, $src1, $src2\t#@divF_reg_reg" %} +++ +++ ins_encode %{ +++ __ fdiv_s(as_FloatRegister($dst$$reg), +++ as_FloatRegister($src1$$reg), +++ as_FloatRegister($src2$$reg)); +++ %} +++ +++ ins_pipe(fp_div_s); +++%} +++ +++instruct divD_reg_reg(fRegD dst, fRegD src1, fRegD src2) %{ +++ match(Set dst (DivD src1 src2)); +++ +++ ins_cost(FDIV_COST); +++ format %{ "fdiv.d $dst, $src1, $src2\t#@divD_reg_reg" %} +++ +++ ins_encode %{ +++ __ fdiv_d(as_FloatRegister($dst$$reg), +++ as_FloatRegister($src1$$reg), +++ as_FloatRegister($src2$$reg)); +++ %} +++ +++ ins_pipe(fp_div_d); +++%} +++ +++instruct negF_reg_reg(fRegF dst, fRegF src) %{ +++ match(Set dst (NegF src)); +++ +++ ins_cost(XFER_COST); +++ format %{ "fsgnjn.s $dst, $src, $src\t#@negF_reg_reg" %} +++ +++ ins_encode %{ +++ __ fneg_s(as_FloatRegister($dst$$reg), +++ as_FloatRegister($src$$reg)); +++ %} +++ +++ ins_pipe(fp_uop_s); +++%} +++ +++instruct negD_reg_reg(fRegD dst, fRegD src) %{ +++ match(Set dst (NegD src)); +++ +++ ins_cost(XFER_COST); +++ format %{ "fsgnjn.d $dst, $src, $src\t#@negD_reg_reg" %} +++ +++ ins_encode %{ +++ __ fneg_d(as_FloatRegister($dst$$reg), +++ as_FloatRegister($src$$reg)); +++ %} +++ +++ ins_pipe(fp_uop_d); +++%} +++ +++instruct absI_reg(iRegINoSp dst, iRegIorL2I src) %{ +++ match(Set dst (AbsI src)); +++ +++ ins_cost(ALU_COST * 3); +++ format %{ +++ "sraiw t0, $src, 0x1f\n\t" +++ "addw $dst, $src, t0\n\t" +++ "xorr $dst, $dst, t0\t#@absI_reg" +++ %} +++ +++ ins_encode %{ +++ __ sraiw(t0, as_Register($src$$reg), 0x1f); +++ __ addw(as_Register($dst$$reg), as_Register($src$$reg), t0); +++ __ xorr(as_Register($dst$$reg), as_Register($dst$$reg), t0); +++ %} +++ +++ ins_pipe(pipe_class_default); +++%} +++ +++instruct absL_reg(iRegLNoSp dst, iRegL src) %{ +++ match(Set dst (AbsL src)); +++ +++ ins_cost(ALU_COST * 3); +++ format %{ +++ "srai t0, $src, 0x3f\n\t" +++ "add $dst, $src, t0\n\t" +++ "xorr $dst, $dst, t0\t#@absL_reg" +++ %} +++ +++ ins_encode %{ +++ __ srai(t0, as_Register($src$$reg), 0x3f); +++ __ add(as_Register($dst$$reg), as_Register($src$$reg), t0); +++ __ xorr(as_Register($dst$$reg), as_Register($dst$$reg), t0); +++ %} +++ +++ ins_pipe(pipe_class_default); +++%} +++ +++instruct absF_reg(fRegF dst, fRegF src) %{ +++ match(Set dst (AbsF src)); +++ +++ ins_cost(XFER_COST); +++ format %{ "fsgnjx.s $dst, $src, $src\t#@absF_reg" %} +++ ins_encode %{ +++ __ fabs_s(as_FloatRegister($dst$$reg), +++ as_FloatRegister($src$$reg)); +++ %} +++ +++ ins_pipe(fp_uop_s); +++%} +++ +++instruct absD_reg(fRegD dst, fRegD src) %{ +++ match(Set dst (AbsD src)); +++ +++ ins_cost(XFER_COST); +++ format %{ "fsgnjx.d $dst, $src, $src\t#@absD_reg" %} +++ ins_encode %{ +++ __ fabs_d(as_FloatRegister($dst$$reg), +++ as_FloatRegister($src$$reg)); +++ %} +++ +++ ins_pipe(fp_uop_d); +++%} +++ +++instruct sqrtF_reg(fRegF dst, fRegF src) %{ +++ match(Set dst (SqrtF src)); +++ +++ ins_cost(FSQRT_COST); +++ format %{ "fsqrt.s $dst, $src\t#@sqrtF_reg" %} +++ ins_encode %{ +++ __ fsqrt_s(as_FloatRegister($dst$$reg), +++ as_FloatRegister($src$$reg)); +++ %} +++ +++ ins_pipe(fp_sqrt_s); +++%} +++ +++instruct sqrtD_reg(fRegD dst, fRegD src) %{ +++ match(Set dst (SqrtD src)); +++ +++ ins_cost(FSQRT_COST); +++ format %{ "fsqrt.d $dst, $src\t#@sqrtD_reg" %} +++ ins_encode %{ +++ __ fsqrt_d(as_FloatRegister($dst$$reg), +++ as_FloatRegister($src$$reg)); +++ %} +++ +++ ins_pipe(fp_sqrt_d); +++%} +++ +++// Arithmetic Instructions End +++ +++// ============================================================================ +++// Logical Instructions +++ +++// Register And +++instruct andI_reg_reg(iRegINoSp dst, iRegI src1, iRegI src2) %{ +++ match(Set dst (AndI src1 src2)); +++ +++ format %{ "andr $dst, $src1, $src2\t#@andI_reg_reg" %} +++ +++ ins_cost(ALU_COST); +++ ins_encode %{ +++ __ andr(as_Register($dst$$reg), +++ as_Register($src1$$reg), +++ as_Register($src2$$reg)); +++ %} +++ +++ ins_pipe(ialu_reg_reg); +++%} +++ +++// Immediate And +++instruct andI_reg_imm(iRegINoSp dst, iRegI src1, immIAdd src2) %{ +++ match(Set dst (AndI src1 src2)); +++ +++ format %{ "andi $dst, $src1, $src2\t#@andI_reg_imm" %} +++ +++ ins_cost(ALU_COST); +++ ins_encode %{ +++ __ andi(as_Register($dst$$reg), +++ as_Register($src1$$reg), +++ (int32_t)($src2$$constant)); +++ %} +++ +++ ins_pipe(ialu_reg_imm); +++%} +++ +++// Register Or +++instruct orI_reg_reg(iRegINoSp dst, iRegI src1, iRegI src2) %{ +++ match(Set dst (OrI src1 src2)); +++ +++ format %{ "orr $dst, $src1, $src2\t#@orI_reg_reg" %} +++ +++ ins_cost(ALU_COST); +++ ins_encode %{ +++ __ orr(as_Register($dst$$reg), +++ as_Register($src1$$reg), +++ as_Register($src2$$reg)); +++ %} +++ +++ ins_pipe(ialu_reg_reg); +++%} +++ +++// Immediate Or +++instruct orI_reg_imm(iRegINoSp dst, iRegI src1, immIAdd src2) %{ +++ match(Set dst (OrI src1 src2)); +++ +++ format %{ "ori $dst, $src1, $src2\t#@orI_reg_imm" %} +++ +++ ins_cost(ALU_COST); +++ ins_encode %{ +++ __ ori(as_Register($dst$$reg), +++ as_Register($src1$$reg), +++ (int32_t)($src2$$constant)); +++ %} +++ +++ ins_pipe(ialu_reg_imm); +++%} +++ +++// Register Xor +++instruct xorI_reg_reg(iRegINoSp dst, iRegI src1, iRegI src2) %{ +++ match(Set dst (XorI src1 src2)); +++ +++ format %{ "xorr $dst, $src1, $src2\t#@xorI_reg_reg" %} +++ +++ ins_cost(ALU_COST); +++ ins_encode %{ +++ __ xorr(as_Register($dst$$reg), +++ as_Register($src1$$reg), +++ as_Register($src2$$reg)); +++ %} +++ +++ ins_pipe(ialu_reg_reg); +++%} +++ +++// Immediate Xor +++instruct xorI_reg_imm(iRegINoSp dst, iRegI src1, immIAdd src2) %{ +++ match(Set dst (XorI src1 src2)); +++ +++ format %{ "xori $dst, $src1, $src2\t#@xorI_reg_imm" %} +++ +++ ins_cost(ALU_COST); +++ ins_encode %{ +++ __ xori(as_Register($dst$$reg), +++ as_Register($src1$$reg), +++ (int32_t)($src2$$constant)); +++ %} +++ +++ ins_pipe(ialu_reg_imm); +++%} +++ +++// Register And Long +++instruct andL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{ +++ match(Set dst (AndL src1 src2)); +++ +++ format %{ "andr $dst, $src1, $src2\t#@andL_reg_reg" %} +++ +++ ins_cost(ALU_COST); +++ ins_encode %{ +++ __ andr(as_Register($dst$$reg), +++ as_Register($src1$$reg), +++ as_Register($src2$$reg)); +++ %} +++ +++ ins_pipe(ialu_reg_reg); +++%} +++ +++// Immediate And Long +++instruct andL_reg_imm(iRegLNoSp dst, iRegL src1, immLAdd src2) %{ +++ match(Set dst (AndL src1 src2)); +++ +++ format %{ "andi $dst, $src1, $src2\t#@andL_reg_imm" %} +++ +++ ins_cost(ALU_COST); +++ ins_encode %{ +++ __ andi(as_Register($dst$$reg), +++ as_Register($src1$$reg), +++ (int32_t)($src2$$constant)); +++ %} +++ +++ ins_pipe(ialu_reg_imm); +++%} +++ +++// Register Or Long +++instruct orL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{ +++ match(Set dst (OrL src1 src2)); +++ +++ format %{ "orr $dst, $src1, $src2\t#@orL_reg_reg" %} +++ +++ ins_cost(ALU_COST); +++ ins_encode %{ +++ __ orr(as_Register($dst$$reg), +++ as_Register($src1$$reg), +++ as_Register($src2$$reg)); +++ %} +++ +++ ins_pipe(ialu_reg_reg); +++%} +++ +++// Immediate Or Long +++instruct orL_reg_imm(iRegLNoSp dst, iRegL src1, immLAdd src2) %{ +++ match(Set dst (OrL src1 src2)); +++ +++ format %{ "ori $dst, $src1, $src2\t#@orL_reg_imm" %} +++ +++ ins_cost(ALU_COST); +++ ins_encode %{ +++ __ ori(as_Register($dst$$reg), +++ as_Register($src1$$reg), +++ (int32_t)($src2$$constant)); +++ %} +++ +++ ins_pipe(ialu_reg_imm); +++%} +++ +++// Register Xor Long +++instruct xorL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{ +++ match(Set dst (XorL src1 src2)); +++ +++ format %{ "xorr $dst, $src1, $src2\t#@xorL_reg_reg" %} +++ +++ ins_cost(ALU_COST); +++ ins_encode %{ +++ __ xorr(as_Register($dst$$reg), +++ as_Register($src1$$reg), +++ as_Register($src2$$reg)); +++ %} +++ +++ ins_pipe(ialu_reg_reg); +++%} +++ +++// Immediate Xor Long +++instruct xorL_reg_imm(iRegLNoSp dst, iRegL src1, immLAdd src2) %{ +++ match(Set dst (XorL src1 src2)); +++ +++ ins_cost(ALU_COST); +++ format %{ "xori $dst, $src1, $src2\t#@xorL_reg_imm" %} +++ +++ ins_encode %{ +++ __ xori(as_Register($dst$$reg), +++ as_Register($src1$$reg), +++ (int32_t)($src2$$constant)); +++ %} +++ +++ ins_pipe(ialu_reg_imm); +++%} +++ +++// ============================================================================ +++// BSWAP Instructions +++ +++instruct bytes_reverse_int(iRegINoSp dst, iRegIorL2I src, rFlagsReg cr) %{ +++ match(Set dst (ReverseBytesI src)); +++ effect(KILL cr); +++ +++ ins_cost(ALU_COST * 13); +++ format %{ "revb_w_w $dst, $src\t#@bytes_reverse_int" %} +++ +++ ins_encode %{ +++ __ revb_w_w(as_Register($dst$$reg), as_Register($src$$reg)); +++ %} +++ +++ ins_pipe(pipe_class_default); +++%} +++ +++instruct bytes_reverse_long(iRegLNoSp dst, iRegL src, rFlagsReg cr) %{ +++ match(Set dst (ReverseBytesL src)); +++ effect(KILL cr); +++ +++ ins_cost(ALU_COST * 29); +++ format %{ "revb $dst, $src\t#@bytes_reverse_long" %} +++ +++ ins_encode %{ +++ __ revb(as_Register($dst$$reg), as_Register($src$$reg)); +++ %} +++ +++ ins_pipe(pipe_class_default); +++%} +++ +++instruct bytes_reverse_unsigned_short(iRegINoSp dst, iRegIorL2I src) %{ +++ match(Set dst (ReverseBytesUS src)); +++ +++ ins_cost(ALU_COST * 5); +++ format %{ "revb_h_h_u $dst, $src\t#@bytes_reverse_unsigned_short" %} +++ +++ ins_encode %{ +++ __ revb_h_h_u(as_Register($dst$$reg), as_Register($src$$reg)); +++ %} +++ +++ ins_pipe(pipe_class_default); +++%} +++ +++instruct bytes_reverse_short(iRegINoSp dst, iRegIorL2I src) %{ +++ match(Set dst (ReverseBytesS src)); +++ +++ ins_cost(ALU_COST * 5); +++ format %{ "revb_h_h $dst, $src\t#@bytes_reverse_short" %} +++ +++ ins_encode %{ +++ __ revb_h_h(as_Register($dst$$reg), as_Register($src$$reg)); +++ %} +++ +++ ins_pipe(pipe_class_default); +++%} +++ +++// ============================================================================ +++// MemBar Instruction +++ +++instruct load_fence() %{ +++ match(LoadFence); +++ ins_cost(ALU_COST); +++ +++ format %{ "#@load_fence" %} +++ +++ ins_encode %{ +++ __ membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore); +++ %} +++ ins_pipe(pipe_serial); +++%} +++ +++instruct membar_acquire() %{ +++ match(MemBarAcquire); +++ ins_cost(ALU_COST); +++ +++ format %{ "#@membar_acquire\n\t" +++ "fence ir iorw" %} +++ +++ ins_encode %{ +++ __ block_comment("membar_acquire"); +++ __ membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore); +++ %} +++ +++ ins_pipe(pipe_serial); +++%} +++ +++instruct membar_acquire_lock() %{ +++ match(MemBarAcquireLock); +++ ins_cost(0); +++ +++ format %{ "#@membar_acquire_lock (elided)" %} +++ +++ ins_encode %{ +++ __ block_comment("membar_acquire_lock (elided)"); +++ %} +++ +++ ins_pipe(pipe_serial); +++%} +++ +++instruct store_fence() %{ +++ match(StoreFence); +++ ins_cost(ALU_COST); +++ +++ format %{ "#@store_fence" %} +++ +++ ins_encode %{ +++ __ membar(MacroAssembler::LoadStore | MacroAssembler::StoreStore); +++ %} +++ ins_pipe(pipe_serial); +++%} +++ +++instruct membar_release() %{ +++ match(MemBarRelease); +++ ins_cost(ALU_COST); +++ +++ format %{ "#@membar_release\n\t" +++ "fence iorw ow" %} +++ +++ ins_encode %{ +++ __ block_comment("membar_release"); +++ __ membar(MacroAssembler::LoadStore | MacroAssembler::StoreStore); +++ %} +++ ins_pipe(pipe_serial); +++%} +++ +++instruct membar_storestore() %{ +++ match(MemBarStoreStore); +++ match(StoreStoreFence); +++ ins_cost(ALU_COST); +++ +++ format %{ "MEMBAR-store-store\t#@membar_storestore" %} +++ +++ ins_encode %{ +++ __ membar(MacroAssembler::StoreStore); +++ %} +++ ins_pipe(pipe_serial); +++%} +++ +++instruct membar_release_lock() %{ +++ match(MemBarReleaseLock); +++ ins_cost(0); +++ +++ format %{ "#@membar_release_lock (elided)" %} +++ +++ ins_encode %{ +++ __ block_comment("membar_release_lock (elided)"); +++ %} +++ +++ ins_pipe(pipe_serial); +++%} +++ +++instruct membar_volatile() %{ +++ match(MemBarVolatile); +++ ins_cost(ALU_COST); +++ +++ format %{ "#@membar_volatile\n\t" +++ "fence iorw iorw"%} +++ +++ ins_encode %{ +++ __ block_comment("membar_volatile"); +++ __ membar(MacroAssembler::StoreLoad); +++ %} +++ +++ ins_pipe(pipe_serial); +++%} +++ +++// ============================================================================ +++// Cast Instructions (Java-level type cast) +++ +++instruct castX2P(iRegPNoSp dst, iRegL src) %{ +++ match(Set dst (CastX2P src)); +++ +++ ins_cost(ALU_COST); +++ format %{ "mv $dst, $src\t# long -> ptr, #@castX2P" %} +++ +++ ins_encode %{ +++ if ($dst$$reg != $src$$reg) { +++ __ mv(as_Register($dst$$reg), as_Register($src$$reg)); +++ } +++ %} +++ +++ ins_pipe(ialu_reg); +++%} +++ +++instruct castP2X(iRegLNoSp dst, iRegP src) %{ +++ match(Set dst (CastP2X src)); +++ +++ ins_cost(ALU_COST); +++ format %{ "mv $dst, $src\t# ptr -> long, #@castP2X" %} +++ +++ ins_encode %{ +++ if ($dst$$reg != $src$$reg) { +++ __ mv(as_Register($dst$$reg), as_Register($src$$reg)); +++ } +++ %} +++ +++ ins_pipe(ialu_reg); +++%} +++ +++instruct castPP(iRegPNoSp dst) +++%{ +++ match(Set dst (CastPP dst)); +++ ins_cost(0); +++ +++ size(0); +++ format %{ "# castPP of $dst, #@castPP" %} +++ ins_encode(/* empty encoding */); +++ ins_pipe(pipe_class_empty); +++%} +++ +++instruct castLL(iRegL dst) +++%{ +++ match(Set dst (CastLL dst)); +++ +++ size(0); +++ format %{ "# castLL of $dst, #@castLL" %} +++ ins_encode(/* empty encoding */); +++ ins_cost(0); +++ ins_pipe(pipe_class_empty); +++%} +++ +++instruct castII(iRegI dst) +++%{ +++ match(Set dst (CastII dst)); +++ +++ size(0); +++ format %{ "# castII of $dst, #@castII" %} +++ ins_encode(/* empty encoding */); +++ ins_cost(0); +++ ins_pipe(pipe_class_empty); +++%} +++ +++instruct checkCastPP(iRegPNoSp dst) +++%{ +++ match(Set dst (CheckCastPP dst)); +++ +++ size(0); +++ ins_cost(0); +++ format %{ "# checkcastPP of $dst, #@checkCastPP" %} +++ ins_encode(/* empty encoding */); +++ ins_pipe(pipe_class_empty); +++%} +++ +++instruct castFF(fRegF dst) +++%{ +++ match(Set dst (CastFF dst)); +++ +++ size(0); +++ format %{ "# castFF of $dst" %} +++ ins_encode(/* empty encoding */); +++ ins_cost(0); +++ ins_pipe(pipe_class_empty); +++%} +++ +++instruct castDD(fRegD dst) +++%{ +++ match(Set dst (CastDD dst)); +++ +++ size(0); +++ format %{ "# castDD of $dst" %} +++ ins_encode(/* empty encoding */); +++ ins_cost(0); +++ ins_pipe(pipe_class_empty); +++%} +++ +++instruct castVV(vReg dst) +++%{ +++ match(Set dst (CastVV dst)); +++ +++ size(0); +++ format %{ "# castVV of $dst" %} +++ ins_encode(/* empty encoding */); +++ ins_cost(0); +++ ins_pipe(pipe_class_empty); +++%} +++ +++// ============================================================================ +++// Convert Instructions +++ +++// int to bool +++instruct convI2Bool(iRegINoSp dst, iRegI src) +++%{ +++ match(Set dst (Conv2B src)); +++ +++ ins_cost(ALU_COST); +++ format %{ "snez $dst, $src\t#@convI2Bool" %} +++ +++ ins_encode %{ +++ __ snez(as_Register($dst$$reg), as_Register($src$$reg)); +++ %} +++ +++ ins_pipe(ialu_reg); +++%} +++ +++// pointer to bool +++instruct convP2Bool(iRegINoSp dst, iRegP src) +++%{ +++ match(Set dst (Conv2B src)); +++ +++ ins_cost(ALU_COST); +++ format %{ "snez $dst, $src\t#@convP2Bool" %} +++ +++ ins_encode %{ +++ __ snez(as_Register($dst$$reg), as_Register($src$$reg)); +++ %} +++ +++ ins_pipe(ialu_reg); +++%} +++ +++// int <-> long +++ +++instruct convI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src) +++%{ +++ match(Set dst (ConvI2L src)); +++ +++ ins_cost(ALU_COST); +++ format %{ "addw $dst, $src, zr\t#@convI2L_reg_reg" %} +++ ins_encode %{ +++ __ sign_extend(as_Register($dst$$reg), as_Register($src$$reg), 32); +++ %} +++ ins_pipe(ialu_reg); +++%} +++ +++instruct convL2I_reg(iRegINoSp dst, iRegL src) %{ +++ match(Set dst (ConvL2I src)); +++ +++ ins_cost(ALU_COST); +++ format %{ "addw $dst, $src, zr\t#@convL2I_reg" %} +++ +++ ins_encode %{ +++ __ sign_extend(as_Register($dst$$reg), as_Register($src$$reg), 32); +++ %} +++ +++ ins_pipe(ialu_reg); +++%} +++ +++// int to unsigned long (Zero-extend) +++instruct convI2UL_reg_reg(iRegLNoSp dst, iRegIorL2I src, immL_32bits mask) +++%{ +++ match(Set dst (AndL (ConvI2L src) mask)); +++ +++ ins_cost(ALU_COST * 2); +++ format %{ "zero_extend $dst, $src, 32\t# i2ul, #@convI2UL_reg_reg" %} +++ +++ ins_encode %{ +++ __ zero_extend(as_Register($dst$$reg), as_Register($src$$reg), 32); +++ %} +++ +++ ins_pipe(ialu_reg_shift); +++%} +++ +++// float <-> double +++ +++instruct convF2D_reg(fRegD dst, fRegF src) %{ +++ match(Set dst (ConvF2D src)); +++ +++ ins_cost(XFER_COST); +++ format %{ "fcvt.d.s $dst, $src\t#@convF2D_reg" %} +++ +++ ins_encode %{ +++ __ fcvt_d_s(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg)); +++ %} +++ +++ ins_pipe(fp_f2d); +++%} +++ +++instruct convD2F_reg(fRegF dst, fRegD src) %{ +++ match(Set dst (ConvD2F src)); +++ +++ ins_cost(XFER_COST); +++ format %{ "fcvt.s.d $dst, $src\t#@convD2F_reg" %} +++ +++ ins_encode %{ +++ __ fcvt_s_d(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg)); +++ %} +++ +++ ins_pipe(fp_d2f); +++%} +++ +++// float <-> int +++ +++instruct convF2I_reg_reg(iRegINoSp dst, fRegF src) %{ +++ match(Set dst (ConvF2I src)); +++ +++ ins_cost(XFER_COST); +++ format %{ "fcvt.w.s $dst, $src\t#@convF2I_reg_reg" %} +++ +++ ins_encode %{ +++ __ fcvt_w_s_safe($dst$$Register, $src$$FloatRegister); +++ %} +++ +++ ins_pipe(fp_f2i); +++%} +++ +++instruct convI2F_reg_reg(fRegF dst, iRegIorL2I src) %{ +++ match(Set dst (ConvI2F src)); +++ +++ ins_cost(XFER_COST); +++ format %{ "fcvt.s.w $dst, $src\t#@convI2F_reg_reg" %} +++ +++ ins_encode %{ +++ __ fcvt_s_w(as_FloatRegister($dst$$reg), as_Register($src$$reg)); +++ %} +++ +++ ins_pipe(fp_i2f); +++%} +++ +++// float <-> long +++ +++instruct convF2L_reg_reg(iRegLNoSp dst, fRegF src) %{ +++ match(Set dst (ConvF2L src)); +++ +++ ins_cost(XFER_COST); +++ format %{ "fcvt.l.s $dst, $src\t#@convF2L_reg_reg" %} +++ +++ ins_encode %{ +++ __ fcvt_l_s_safe($dst$$Register, $src$$FloatRegister); +++ %} +++ +++ ins_pipe(fp_f2l); +++%} +++ +++instruct convL2F_reg_reg(fRegF dst, iRegL src) %{ +++ match(Set dst (ConvL2F src)); +++ +++ ins_cost(XFER_COST); +++ format %{ "fcvt.s.l $dst, $src\t#@convL2F_reg_reg" %} +++ +++ ins_encode %{ +++ __ fcvt_s_l(as_FloatRegister($dst$$reg), as_Register($src$$reg)); +++ %} +++ +++ ins_pipe(fp_l2f); +++%} +++ +++// double <-> int +++ +++instruct convD2I_reg_reg(iRegINoSp dst, fRegD src) %{ +++ match(Set dst (ConvD2I src)); +++ +++ ins_cost(XFER_COST); +++ format %{ "fcvt.w.d $dst, $src\t#@convD2I_reg_reg" %} +++ +++ ins_encode %{ +++ __ fcvt_w_d_safe($dst$$Register, $src$$FloatRegister); +++ %} +++ +++ ins_pipe(fp_d2i); +++%} +++ +++instruct convI2D_reg_reg(fRegD dst, iRegIorL2I src) %{ +++ match(Set dst (ConvI2D src)); +++ +++ ins_cost(XFER_COST); +++ format %{ "fcvt.d.w $dst, $src\t#@convI2D_reg_reg" %} +++ +++ ins_encode %{ +++ __ fcvt_d_w(as_FloatRegister($dst$$reg), as_Register($src$$reg)); +++ %} +++ +++ ins_pipe(fp_i2d); +++%} +++ +++// double <-> long +++ +++instruct convD2L_reg_reg(iRegLNoSp dst, fRegD src) %{ +++ match(Set dst (ConvD2L src)); +++ +++ ins_cost(XFER_COST); +++ format %{ "fcvt.l.d $dst, $src\t#@convD2L_reg_reg" %} +++ +++ ins_encode %{ +++ __ fcvt_l_d_safe($dst$$Register, $src$$FloatRegister); +++ %} +++ +++ ins_pipe(fp_d2l); +++%} +++ +++instruct convL2D_reg_reg(fRegD dst, iRegL src) %{ +++ match(Set dst (ConvL2D src)); +++ +++ ins_cost(XFER_COST); +++ format %{ "fcvt.d.l $dst, $src\t#@convL2D_reg_reg" %} +++ +++ ins_encode %{ +++ __ fcvt_d_l(as_FloatRegister($dst$$reg), as_Register($src$$reg)); +++ %} +++ +++ ins_pipe(fp_l2d); +++%} +++ +++// Convert oop into int for vectors alignment masking +++instruct convP2I(iRegINoSp dst, iRegP src) %{ +++ match(Set dst (ConvL2I (CastP2X src))); +++ +++ ins_cost(ALU_COST * 2); +++ format %{ "zero_extend $dst, $src, 32\t# ptr -> int, #@convP2I" %} +++ +++ ins_encode %{ +++ __ zero_extend($dst$$Register, $src$$Register, 32); +++ %} +++ +++ ins_pipe(ialu_reg); +++%} +++ +++// Convert compressed oop into int for vectors alignment masking +++// in case of 32bit oops (heap < 4Gb). +++instruct convN2I(iRegINoSp dst, iRegN src) +++%{ +++ predicate(CompressedOops::shift() == 0); +++ match(Set dst (ConvL2I (CastP2X (DecodeN src)))); +++ +++ ins_cost(ALU_COST); +++ format %{ "mv $dst, $src\t# compressed ptr -> int, #@convN2I" %} +++ +++ ins_encode %{ +++ __ mv($dst$$Register, $src$$Register); +++ %} +++ +++ ins_pipe(ialu_reg); +++%} +++ +++// Convert oop pointer into compressed form +++instruct encodeHeapOop(iRegNNoSp dst, iRegP src) %{ +++ match(Set dst (EncodeP src)); +++ ins_cost(ALU_COST); +++ format %{ "encode_heap_oop $dst, $src\t#@encodeHeapOop" %} +++ ins_encode %{ +++ Register s = $src$$Register; +++ Register d = $dst$$Register; +++ __ encode_heap_oop(d, s); +++ %} +++ ins_pipe(pipe_class_default); +++%} +++ +++instruct decodeHeapOop(iRegPNoSp dst, iRegN src) %{ +++ predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull && +++ n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant); +++ match(Set dst (DecodeN src)); +++ +++ ins_cost(0); +++ format %{ "decode_heap_oop $dst, $src\t#@decodeHeapOop" %} +++ ins_encode %{ +++ Register s = $src$$Register; +++ Register d = $dst$$Register; +++ __ decode_heap_oop(d, s); +++ %} +++ ins_pipe(pipe_class_default); +++%} +++ +++instruct decodeHeapOop_not_null(iRegPNoSp dst, iRegN src) %{ +++ predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull || +++ n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant); +++ match(Set dst (DecodeN src)); +++ +++ ins_cost(0); +++ format %{ "decode_heap_oop_not_null $dst, $src\t#@decodeHeapOop_not_null" %} +++ ins_encode %{ +++ Register s = $src$$Register; +++ Register d = $dst$$Register; +++ __ decode_heap_oop_not_null(d, s); +++ %} +++ ins_pipe(pipe_class_default); +++%} +++ +++// Convert klass pointer into compressed form. +++instruct encodeKlass_not_null(iRegNNoSp dst, iRegP src) %{ +++ match(Set dst (EncodePKlass src)); +++ +++ ins_cost(ALU_COST); +++ format %{ "encode_klass_not_null $dst, $src\t#@encodeKlass_not_null" %} +++ +++ ins_encode %{ +++ Register src_reg = as_Register($src$$reg); +++ Register dst_reg = as_Register($dst$$reg); +++ __ encode_klass_not_null(dst_reg, src_reg, t0); +++ %} +++ +++ ins_pipe(pipe_class_default); +++%} +++ +++instruct decodeKlass_not_null(iRegPNoSp dst, iRegN src, iRegPNoSp tmp) %{ +++ match(Set dst (DecodeNKlass src)); +++ +++ effect(TEMP tmp); +++ +++ ins_cost(ALU_COST); +++ format %{ "decode_klass_not_null $dst, $src\t#@decodeKlass_not_null" %} +++ +++ ins_encode %{ +++ Register src_reg = as_Register($src$$reg); +++ Register dst_reg = as_Register($dst$$reg); +++ Register tmp_reg = as_Register($tmp$$reg); +++ __ decode_klass_not_null(dst_reg, src_reg, tmp_reg); +++ %} +++ +++ ins_pipe(pipe_class_default); +++%} +++ +++// stack <-> reg and reg <-> reg shuffles with no conversion +++ +++instruct MoveF2I_stack_reg(iRegINoSp dst, stackSlotF src) %{ +++ +++ match(Set dst (MoveF2I src)); +++ +++ effect(DEF dst, USE src); +++ +++ ins_cost(LOAD_COST); +++ +++ format %{ "lw $dst, $src\t#@MoveF2I_stack_reg" %} +++ +++ ins_encode %{ +++ __ lw(as_Register($dst$$reg), Address(sp, $src$$disp)); +++ %} +++ +++ ins_pipe(iload_reg_reg); +++ +++%} +++ +++instruct MoveI2F_stack_reg(fRegF dst, stackSlotI src) %{ +++ +++ match(Set dst (MoveI2F src)); +++ +++ effect(DEF dst, USE src); +++ +++ ins_cost(LOAD_COST); +++ +++ format %{ "flw $dst, $src\t#@MoveI2F_stack_reg" %} +++ +++ ins_encode %{ +++ __ flw(as_FloatRegister($dst$$reg), Address(sp, $src$$disp)); +++ %} +++ +++ ins_pipe(fp_load_mem_s); +++ +++%} +++ +++instruct MoveD2L_stack_reg(iRegLNoSp dst, stackSlotD src) %{ +++ +++ match(Set dst (MoveD2L src)); +++ +++ effect(DEF dst, USE src); +++ +++ ins_cost(LOAD_COST); +++ +++ format %{ "ld $dst, $src\t#@MoveD2L_stack_reg" %} +++ +++ ins_encode %{ +++ __ ld(as_Register($dst$$reg), Address(sp, $src$$disp)); +++ %} +++ +++ ins_pipe(iload_reg_reg); +++ +++%} +++ +++instruct MoveL2D_stack_reg(fRegD dst, stackSlotL src) %{ +++ +++ match(Set dst (MoveL2D src)); +++ +++ effect(DEF dst, USE src); +++ +++ ins_cost(LOAD_COST); +++ +++ format %{ "fld $dst, $src\t#@MoveL2D_stack_reg" %} +++ +++ ins_encode %{ +++ __ fld(as_FloatRegister($dst$$reg), Address(sp, $src$$disp)); +++ %} +++ +++ ins_pipe(fp_load_mem_d); +++ +++%} +++ +++instruct MoveF2I_reg_stack(stackSlotI dst, fRegF src) %{ +++ +++ match(Set dst (MoveF2I src)); +++ +++ effect(DEF dst, USE src); +++ +++ ins_cost(STORE_COST); +++ +++ format %{ "fsw $src, $dst\t#@MoveF2I_reg_stack" %} +++ +++ ins_encode %{ +++ __ fsw(as_FloatRegister($src$$reg), Address(sp, $dst$$disp)); +++ %} +++ +++ ins_pipe(fp_store_reg_s); +++ +++%} +++ +++instruct MoveI2F_reg_stack(stackSlotF dst, iRegI src) %{ +++ +++ match(Set dst (MoveI2F src)); +++ +++ effect(DEF dst, USE src); +++ +++ ins_cost(STORE_COST); +++ +++ format %{ "sw $src, $dst\t#@MoveI2F_reg_stack" %} +++ +++ ins_encode %{ +++ __ sw(as_Register($src$$reg), Address(sp, $dst$$disp)); +++ %} +++ +++ ins_pipe(istore_reg_reg); +++ +++%} +++ +++instruct MoveD2L_reg_stack(stackSlotL dst, fRegD src) %{ +++ +++ match(Set dst (MoveD2L src)); +++ +++ effect(DEF dst, USE src); +++ +++ ins_cost(STORE_COST); +++ +++ format %{ "fsd $dst, $src\t#@MoveD2L_reg_stack" %} +++ +++ ins_encode %{ +++ __ fsd(as_FloatRegister($src$$reg), Address(sp, $dst$$disp)); +++ %} +++ +++ ins_pipe(fp_store_reg_d); +++ +++%} +++ +++instruct MoveL2D_reg_stack(stackSlotD dst, iRegL src) %{ +++ +++ match(Set dst (MoveL2D src)); +++ +++ effect(DEF dst, USE src); +++ +++ ins_cost(STORE_COST); +++ +++ format %{ "sd $src, $dst\t#@MoveL2D_reg_stack" %} +++ +++ ins_encode %{ +++ __ sd(as_Register($src$$reg), Address(sp, $dst$$disp)); +++ %} +++ +++ ins_pipe(istore_reg_reg); +++ +++%} +++ +++instruct MoveF2I_reg_reg(iRegINoSp dst, fRegF src) %{ +++ +++ match(Set dst (MoveF2I src)); +++ +++ effect(DEF dst, USE src); +++ +++ ins_cost(XFER_COST); +++ +++ format %{ "fmv.x.w $dst, $src\t#@MoveL2D_reg_stack" %} +++ +++ ins_encode %{ +++ __ fmv_x_w(as_Register($dst$$reg), as_FloatRegister($src$$reg)); +++ %} +++ +++ ins_pipe(fp_f2i); +++ +++%} +++ +++instruct MoveI2F_reg_reg(fRegF dst, iRegI src) %{ +++ +++ match(Set dst (MoveI2F src)); +++ +++ effect(DEF dst, USE src); +++ +++ ins_cost(XFER_COST); +++ +++ format %{ "fmv.w.x $dst, $src\t#@MoveI2F_reg_reg" %} +++ +++ ins_encode %{ +++ __ fmv_w_x(as_FloatRegister($dst$$reg), as_Register($src$$reg)); +++ %} +++ +++ ins_pipe(fp_i2f); +++ +++%} +++ +++instruct MoveD2L_reg_reg(iRegLNoSp dst, fRegD src) %{ +++ +++ match(Set dst (MoveD2L src)); +++ +++ effect(DEF dst, USE src); +++ +++ ins_cost(XFER_COST); +++ +++ format %{ "fmv.x.d $dst, $src\t#@MoveD2L_reg_reg" %} +++ +++ ins_encode %{ +++ __ fmv_x_d(as_Register($dst$$reg), as_FloatRegister($src$$reg)); +++ %} +++ +++ ins_pipe(fp_d2l); +++ +++%} +++ +++instruct MoveL2D_reg_reg(fRegD dst, iRegL src) %{ +++ +++ match(Set dst (MoveL2D src)); +++ +++ effect(DEF dst, USE src); +++ +++ ins_cost(XFER_COST); +++ +++ format %{ "fmv.d.x $dst, $src\t#@MoveD2L_reg_reg" %} +++ +++ ins_encode %{ +++ __ fmv_d_x(as_FloatRegister($dst$$reg), as_Register($src$$reg)); +++ %} +++ +++ ins_pipe(fp_l2d); +++ +++%} +++ +++// ============================================================================ +++// Compare Instructions which set the result float comparisons in dest register. +++ +++instruct cmpF3_reg_reg(iRegINoSp dst, fRegF op1, fRegF op2) +++%{ +++ match(Set dst (CmpF3 op1 op2)); +++ +++ ins_cost(XFER_COST * 2 + BRANCH_COST + ALU_COST); +++ format %{ "flt.s $dst, $op2, $op1\t#@cmpF3_reg_reg\n\t" +++ "bgtz $dst, done\n\t" +++ "feq.s $dst, $op1, $op2\n\t" +++ "addi $dst, $dst, -1\t#@cmpF3_reg_reg" +++ %} +++ +++ ins_encode %{ +++ // we want -1 for unordered or less than, 0 for equal and 1 for greater than. +++ __ float_compare(as_Register($dst$$reg), as_FloatRegister($op1$$reg), +++ as_FloatRegister($op2$$reg), -1 /*unordered_result < 0*/); +++ %} +++ +++ ins_pipe(pipe_class_default); +++%} +++ +++instruct cmpD3_reg_reg(iRegINoSp dst, fRegD op1, fRegD op2) +++%{ +++ match(Set dst (CmpD3 op1 op2)); +++ +++ ins_cost(XFER_COST * 2 + BRANCH_COST + ALU_COST); +++ format %{ "flt.d $dst, $op2, $op1\t#@cmpD3_reg_reg\n\t" +++ "bgtz $dst, done\n\t" +++ "feq.d $dst, $op1, $op2\n\t" +++ "addi $dst, $dst, -1\t#@cmpD3_reg_reg" +++ %} +++ +++ ins_encode %{ +++ // we want -1 for unordered or less than, 0 for equal and 1 for greater than. +++ __ double_compare(as_Register($dst$$reg), as_FloatRegister($op1$$reg), as_FloatRegister($op2$$reg), -1 /*unordered_result < 0*/); +++ %} +++ +++ ins_pipe(pipe_class_default); +++%} +++ +++instruct cmpL3_reg_reg(iRegINoSp dst, iRegL op1, iRegL op2) +++%{ +++ match(Set dst (CmpL3 op1 op2)); +++ +++ ins_cost(ALU_COST * 3 + BRANCH_COST); +++ format %{ "slt $dst, $op2, $op1\t#@cmpL3_reg_reg\n\t" +++ "bnez $dst, done\n\t" +++ "slt $dst, $op1, $op2\n\t" +++ "neg $dst, $dst\t#@cmpL3_reg_reg" +++ %} +++ ins_encode %{ +++ __ cmp_l2i(t0, as_Register($op1$$reg), as_Register($op2$$reg)); +++ __ mv(as_Register($dst$$reg), t0); +++ %} +++ +++ ins_pipe(pipe_class_default); +++%} +++ +++instruct cmpLTMask_reg_reg(iRegINoSp dst, iRegI p, iRegI q) +++%{ +++ match(Set dst (CmpLTMask p q)); +++ +++ ins_cost(2 * ALU_COST); +++ +++ format %{ "slt $dst, $p, $q\t#@cmpLTMask_reg_reg\n\t" +++ "subw $dst, zr, $dst\t#@cmpLTMask_reg_reg" +++ %} +++ +++ ins_encode %{ +++ __ slt(as_Register($dst$$reg), as_Register($p$$reg), as_Register($q$$reg)); +++ __ subw(as_Register($dst$$reg), zr, as_Register($dst$$reg)); +++ %} +++ +++ ins_pipe(ialu_reg_reg); +++%} +++ +++instruct cmpLTMask_reg_zero(iRegINoSp dst, iRegIorL2I op, immI0 zero) +++%{ +++ match(Set dst (CmpLTMask op zero)); +++ +++ ins_cost(ALU_COST); +++ +++ format %{ "sraiw $dst, $dst, 31\t#@cmpLTMask_reg_reg" %} +++ +++ ins_encode %{ +++ __ sraiw(as_Register($dst$$reg), as_Register($op$$reg), 31); +++ %} +++ +++ ins_pipe(ialu_reg_shift); +++%} +++ +++ +++// ============================================================================ +++// Max and Min +++ +++instruct minI_reg_reg(iRegINoSp dst, iRegI src) +++%{ +++ match(Set dst (MinI dst src)); +++ +++ ins_cost(BRANCH_COST + ALU_COST); +++ format %{ +++ "ble $dst, $src, skip\t#@minI_reg_reg\n\t" +++ "mv $dst, $src\n\t" +++ "skip:" +++ %} +++ +++ ins_encode %{ +++ Label Lskip; +++ __ ble(as_Register($dst$$reg), as_Register($src$$reg), Lskip); +++ __ mv(as_Register($dst$$reg), as_Register($src$$reg)); +++ __ bind(Lskip); +++ %} +++ +++ ins_pipe(pipe_class_compare); +++%} +++ +++instruct maxI_reg_reg(iRegINoSp dst, iRegI src) +++%{ +++ match(Set dst (MaxI dst src)); +++ +++ ins_cost(BRANCH_COST + ALU_COST); +++ format %{ +++ "bge $dst, $src, skip\t#@maxI_reg_reg\n\t" +++ "mv $dst, $src\n\t" +++ "skip:" +++ %} +++ +++ ins_encode %{ +++ Label Lskip; +++ __ bge(as_Register($dst$$reg), as_Register($src$$reg), Lskip); +++ __ mv(as_Register($dst$$reg), as_Register($src$$reg)); +++ __ bind(Lskip); +++ %} +++ +++ ins_pipe(pipe_class_compare); +++%} +++ +++// special case for comparing with zero +++// n.b. this is selected in preference to the rule above because it +++// avoids loading constant 0 into a source register +++ +++instruct minI_reg_zero(iRegINoSp dst, immI0 zero) +++%{ +++ match(Set dst (MinI dst zero)); +++ match(Set dst (MinI zero dst)); +++ +++ ins_cost(BRANCH_COST + ALU_COST); +++ format %{ +++ "blez $dst, skip\t#@minI_reg_zero\n\t" +++ "mv $dst, zr\n\t" +++ "skip:" +++ %} +++ +++ ins_encode %{ +++ Label Lskip; +++ __ blez(as_Register($dst$$reg), Lskip); +++ __ mv(as_Register($dst$$reg), zr); +++ __ bind(Lskip); +++ %} +++ +++ ins_pipe(pipe_class_compare); +++%} +++ +++instruct maxI_reg_zero(iRegINoSp dst, immI0 zero) +++%{ +++ match(Set dst (MaxI dst zero)); +++ match(Set dst (MaxI zero dst)); +++ +++ ins_cost(BRANCH_COST + ALU_COST); +++ format %{ +++ "bgez $dst, skip\t#@maxI_reg_zero\n\t" +++ "mv $dst, zr\n\t" +++ "skip:" +++ %} +++ +++ ins_encode %{ +++ Label Lskip; +++ __ bgez(as_Register($dst$$reg), Lskip); +++ __ mv(as_Register($dst$$reg), zr); +++ __ bind(Lskip); +++ %} +++ +++ ins_pipe(pipe_class_compare); +++%} +++ +++instruct minI_rReg(iRegINoSp dst, iRegI src1, iRegI src2) +++%{ +++ match(Set dst (MinI src1 src2)); +++ +++ effect(DEF dst, USE src1, USE src2); +++ +++ ins_cost(BRANCH_COST + ALU_COST * 2); +++ format %{ +++ "ble $src1, $src2, Lsrc1.\t#@minI_rReg\n\t" +++ "mv $dst, $src2\n\t" +++ "j Ldone\n\t" +++ "bind Lsrc1\n\t" +++ "mv $dst, $src1\n\t" +++ "bind\t#@minI_rReg" +++ %} +++ +++ ins_encode %{ +++ Label Lsrc1, Ldone; +++ __ ble(as_Register($src1$$reg), as_Register($src2$$reg), Lsrc1); +++ __ mv(as_Register($dst$$reg), as_Register($src2$$reg)); +++ __ j(Ldone); +++ __ bind(Lsrc1); +++ __ mv(as_Register($dst$$reg), as_Register($src1$$reg)); +++ __ bind(Ldone); +++ %} +++ +++ ins_pipe(pipe_class_compare); +++%} +++ +++instruct maxI_rReg(iRegINoSp dst, iRegI src1, iRegI src2) +++%{ +++ match(Set dst (MaxI src1 src2)); +++ +++ effect(DEF dst, USE src1, USE src2); +++ +++ ins_cost(BRANCH_COST + ALU_COST * 2); +++ format %{ +++ "bge $src1, $src2, Lsrc1\t#@maxI_rReg\n\t" +++ "mv $dst, $src2\n\t" +++ "j Ldone\n\t" +++ "bind Lsrc1\n\t" +++ "mv $dst, $src1\n\t" +++ "bind\t#@maxI_rReg" +++ %} +++ +++ ins_encode %{ +++ Label Lsrc1, Ldone; +++ __ bge(as_Register($src1$$reg), as_Register($src2$$reg), Lsrc1); +++ __ mv(as_Register($dst$$reg), as_Register($src2$$reg)); +++ __ j(Ldone); +++ __ bind(Lsrc1); +++ __ mv(as_Register($dst$$reg), as_Register($src1$$reg)); +++ __ bind(Ldone); +++ +++ %} +++ +++ ins_pipe(pipe_class_compare); +++%} +++ +++// ============================================================================ +++// Branch Instructions +++// Direct Branch. +++instruct branch(label lbl) +++%{ +++ match(Goto); +++ +++ effect(USE lbl); +++ +++ ins_cost(BRANCH_COST); +++ format %{ "j $lbl\t#@branch" %} +++ +++ ins_encode(riscv_enc_j(lbl)); +++ +++ ins_pipe(pipe_branch); +++%} +++ +++// ============================================================================ +++// Compare and Branch Instructions +++ +++// Patterns for short (< 12KiB) variants +++ +++// Compare flags and branch near instructions. +++instruct cmpFlag_branch(cmpOpEqNe cmp, rFlagsReg cr, label lbl) %{ +++ match(If cmp cr); +++ effect(USE lbl); +++ +++ ins_cost(BRANCH_COST); +++ format %{ "b$cmp $cr, zr, $lbl\t#@cmpFlag_branch" %} +++ +++ ins_encode %{ +++ __ enc_cmpEqNe_imm0_branch($cmp$$cmpcode, as_Register($cr$$reg), *($lbl$$label)); +++ %} +++ ins_pipe(pipe_cmpz_branch); +++ ins_short_branch(1); +++%} +++ +++// Compare signed int and branch near instructions +++instruct cmpI_branch(cmpOp cmp, iRegI op1, iRegI op2, label lbl) +++%{ +++ // Same match rule as `far_cmpI_branch'. +++ match(If cmp (CmpI op1 op2)); +++ +++ effect(USE lbl); +++ +++ ins_cost(BRANCH_COST); +++ +++ format %{ "b$cmp $op1, $op2, $lbl\t#@cmpI_branch" %} +++ +++ ins_encode %{ +++ __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), as_Register($op2$$reg), *($lbl$$label)); +++ %} +++ +++ ins_pipe(pipe_cmp_branch); +++ ins_short_branch(1); +++%} +++ +++instruct cmpI_loop(cmpOp cmp, iRegI op1, iRegI op2, label lbl) +++%{ +++ // Same match rule as `far_cmpI_loop'. +++ match(CountedLoopEnd cmp (CmpI op1 op2)); +++ +++ effect(USE lbl); +++ +++ ins_cost(BRANCH_COST); +++ +++ format %{ "b$cmp $op1, $op2, $lbl\t#@cmpI_loop" %} +++ +++ ins_encode %{ +++ __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), as_Register($op2$$reg), *($lbl$$label)); +++ %} +++ +++ ins_pipe(pipe_cmp_branch); +++ ins_short_branch(1); +++%} +++ +++// Compare unsigned int and branch near instructions +++instruct cmpU_branch(cmpOpU cmp, iRegI op1, iRegI op2, label lbl) +++%{ +++ // Same match rule as `far_cmpU_branch'. +++ match(If cmp (CmpU op1 op2)); +++ +++ effect(USE lbl); +++ +++ ins_cost(BRANCH_COST); +++ +++ format %{ "b$cmp $op1, $op2, $lbl\t#@cmpU_branch" %} +++ +++ ins_encode %{ +++ __ cmp_branch($cmp$$cmpcode | C2_MacroAssembler::unsigned_branch_mask, as_Register($op1$$reg), +++ as_Register($op2$$reg), *($lbl$$label)); +++ %} +++ +++ ins_pipe(pipe_cmp_branch); +++ ins_short_branch(1); +++%} +++ +++instruct cmpU_loop(cmpOpU cmp, iRegI op1, iRegI op2, label lbl) +++%{ +++ // Same match rule as `far_cmpU_loop'. +++ match(CountedLoopEnd cmp (CmpU op1 op2)); +++ +++ effect(USE lbl); +++ +++ ins_cost(BRANCH_COST); +++ +++ format %{ "b$cmp $op1, $op2, $lbl\t#@cmpU_loop" %} +++ +++ ins_encode %{ +++ __ cmp_branch($cmp$$cmpcode | C2_MacroAssembler::unsigned_branch_mask, as_Register($op1$$reg), +++ as_Register($op2$$reg), *($lbl$$label)); +++ %} +++ +++ ins_pipe(pipe_cmp_branch); +++ ins_short_branch(1); +++%} +++ +++// Compare signed long and branch near instructions +++instruct cmpL_branch(cmpOp cmp, iRegL op1, iRegL op2, label lbl) +++%{ +++ // Same match rule as `far_cmpL_branch'. +++ match(If cmp (CmpL op1 op2)); +++ +++ effect(USE lbl); +++ +++ ins_cost(BRANCH_COST); +++ +++ format %{ "b$cmp $op1, $op2, $lbl\t#@cmpL_branch" %} +++ +++ ins_encode %{ +++ __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), as_Register($op2$$reg), *($lbl$$label)); +++ %} +++ +++ ins_pipe(pipe_cmp_branch); +++ ins_short_branch(1); +++%} +++ +++instruct cmpL_loop(cmpOp cmp, iRegL op1, iRegL op2, label lbl) +++%{ +++ // Same match rule as `far_cmpL_loop'. +++ match(CountedLoopEnd cmp (CmpL op1 op2)); +++ +++ effect(USE lbl); +++ +++ ins_cost(BRANCH_COST); +++ +++ format %{ "b$cmp $op1, $op2, $lbl\t#@cmpL_loop" %} +++ +++ ins_encode %{ +++ __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), as_Register($op2$$reg), *($lbl$$label)); +++ %} +++ +++ ins_pipe(pipe_cmp_branch); +++ ins_short_branch(1); +++%} +++ +++// Compare unsigned long and branch near instructions +++instruct cmpUL_branch(cmpOpU cmp, iRegL op1, iRegL op2, label lbl) +++%{ +++ // Same match rule as `far_cmpUL_branch'. +++ match(If cmp (CmpUL op1 op2)); +++ +++ effect(USE lbl); +++ +++ ins_cost(BRANCH_COST); +++ format %{ "b$cmp $op1, $op2, $lbl\t#@cmpUL_branch" %} +++ +++ ins_encode %{ +++ __ cmp_branch($cmp$$cmpcode | C2_MacroAssembler::unsigned_branch_mask, as_Register($op1$$reg), +++ as_Register($op2$$reg), *($lbl$$label)); +++ %} +++ +++ ins_pipe(pipe_cmp_branch); +++ ins_short_branch(1); +++%} +++ +++instruct cmpUL_loop(cmpOpU cmp, iRegL op1, iRegL op2, label lbl) +++%{ +++ // Same match rule as `far_cmpUL_loop'. +++ match(CountedLoopEnd cmp (CmpUL op1 op2)); +++ +++ effect(USE lbl); +++ +++ ins_cost(BRANCH_COST); +++ format %{ "b$cmp $op1, $op2, $lbl\t#@cmpUL_loop" %} +++ +++ ins_encode %{ +++ __ cmp_branch($cmp$$cmpcode | C2_MacroAssembler::unsigned_branch_mask, as_Register($op1$$reg), +++ as_Register($op2$$reg), *($lbl$$label)); +++ %} +++ +++ ins_pipe(pipe_cmp_branch); +++ ins_short_branch(1); +++%} +++ +++// Compare pointer and branch near instructions +++instruct cmpP_branch(cmpOpU cmp, iRegP op1, iRegP op2, label lbl) +++%{ +++ // Same match rule as `far_cmpP_branch'. +++ match(If cmp (CmpP op1 op2)); +++ +++ effect(USE lbl); +++ +++ ins_cost(BRANCH_COST); +++ +++ format %{ "b$cmp $op1, $op2, $lbl\t#@cmpP_branch" %} +++ +++ ins_encode %{ +++ __ cmp_branch($cmp$$cmpcode | C2_MacroAssembler::unsigned_branch_mask, as_Register($op1$$reg), +++ as_Register($op2$$reg), *($lbl$$label)); +++ %} +++ +++ ins_pipe(pipe_cmp_branch); +++ ins_short_branch(1); +++%} +++ +++instruct cmpP_loop(cmpOpU cmp, iRegP op1, iRegP op2, label lbl) +++%{ +++ // Same match rule as `far_cmpP_loop'. +++ match(CountedLoopEnd cmp (CmpP op1 op2)); +++ +++ effect(USE lbl); +++ +++ ins_cost(BRANCH_COST); +++ +++ format %{ "b$cmp $op1, $op2, $lbl\t#@cmpP_loop" %} +++ +++ ins_encode %{ +++ __ cmp_branch($cmp$$cmpcode | C2_MacroAssembler::unsigned_branch_mask, as_Register($op1$$reg), +++ as_Register($op2$$reg), *($lbl$$label)); +++ %} +++ +++ ins_pipe(pipe_cmp_branch); +++ ins_short_branch(1); +++%} +++ +++// Compare narrow pointer and branch near instructions +++instruct cmpN_branch(cmpOpU cmp, iRegN op1, iRegN op2, label lbl) +++%{ +++ // Same match rule as `far_cmpN_branch'. +++ match(If cmp (CmpN op1 op2)); +++ +++ effect(USE lbl); +++ +++ ins_cost(BRANCH_COST); +++ +++ format %{ "b$cmp $op1, $op2, $lbl\t#@cmpN_branch" %} +++ +++ ins_encode %{ +++ __ cmp_branch($cmp$$cmpcode | C2_MacroAssembler::unsigned_branch_mask, as_Register($op1$$reg), +++ as_Register($op2$$reg), *($lbl$$label)); +++ %} +++ +++ ins_pipe(pipe_cmp_branch); +++ ins_short_branch(1); +++%} +++ +++instruct cmpN_loop(cmpOpU cmp, iRegN op1, iRegN op2, label lbl) +++%{ +++ // Same match rule as `far_cmpN_loop'. +++ match(CountedLoopEnd cmp (CmpN op1 op2)); +++ +++ effect(USE lbl); +++ +++ ins_cost(BRANCH_COST); +++ +++ format %{ "b$cmp $op1, $op2, $lbl\t#@cmpN_loop" %} +++ +++ ins_encode %{ +++ __ cmp_branch($cmp$$cmpcode | C2_MacroAssembler::unsigned_branch_mask, as_Register($op1$$reg), +++ as_Register($op2$$reg), *($lbl$$label)); +++ %} +++ +++ ins_pipe(pipe_cmp_branch); +++ ins_short_branch(1); +++%} +++ +++// Compare float and branch near instructions +++instruct cmpF_branch(cmpOp cmp, fRegF op1, fRegF op2, label lbl) +++%{ +++ // Same match rule as `far_cmpF_branch'. +++ match(If cmp (CmpF op1 op2)); +++ +++ effect(USE lbl); +++ +++ ins_cost(XFER_COST + BRANCH_COST); +++ format %{ "float_b$cmp $op1, $op2, $lbl \t#@cmpF_branch"%} +++ +++ ins_encode %{ +++ __ float_cmp_branch($cmp$$cmpcode, as_FloatRegister($op1$$reg), as_FloatRegister($op2$$reg), *($lbl$$label)); +++ %} +++ +++ ins_pipe(pipe_class_compare); +++ ins_short_branch(1); +++%} +++ +++instruct cmpF_loop(cmpOp cmp, fRegF op1, fRegF op2, label lbl) +++%{ +++ // Same match rule as `far_cmpF_loop'. +++ match(CountedLoopEnd cmp (CmpF op1 op2)); +++ effect(USE lbl); +++ +++ ins_cost(XFER_COST + BRANCH_COST); +++ format %{ "float_b$cmp $op1, $op2, $lbl\t#@cmpF_loop"%} +++ +++ ins_encode %{ +++ __ float_cmp_branch($cmp$$cmpcode, as_FloatRegister($op1$$reg), as_FloatRegister($op2$$reg), *($lbl$$label)); +++ %} +++ +++ ins_pipe(pipe_class_compare); +++ ins_short_branch(1); +++%} +++ +++// Compare double and branch near instructions +++instruct cmpD_branch(cmpOp cmp, fRegD op1, fRegD op2, label lbl) +++%{ +++ // Same match rule as `far_cmpD_branch'. +++ match(If cmp (CmpD op1 op2)); +++ effect(USE lbl); +++ +++ ins_cost(XFER_COST + BRANCH_COST); +++ format %{ "double_b$cmp $op1, $op2, $lbl\t#@cmpD_branch"%} +++ +++ ins_encode %{ +++ __ float_cmp_branch($cmp$$cmpcode | C2_MacroAssembler::double_branch_mask, as_FloatRegister($op1$$reg), +++ as_FloatRegister($op2$$reg), *($lbl$$label)); +++ %} +++ +++ ins_pipe(pipe_class_compare); +++ ins_short_branch(1); +++%} +++ +++instruct cmpD_loop(cmpOp cmp, fRegD op1, fRegD op2, label lbl) +++%{ +++ // Same match rule as `far_cmpD_loop'. +++ match(CountedLoopEnd cmp (CmpD op1 op2)); +++ effect(USE lbl); +++ +++ ins_cost(XFER_COST + BRANCH_COST); +++ format %{ "double_b$cmp $op1, $op2, $lbl\t#@cmpD_loop"%} +++ +++ ins_encode %{ +++ __ float_cmp_branch($cmp$$cmpcode | C2_MacroAssembler::double_branch_mask, as_FloatRegister($op1$$reg), +++ as_FloatRegister($op2$$reg), *($lbl$$label)); +++ %} +++ +++ ins_pipe(pipe_class_compare); +++ ins_short_branch(1); +++%} +++ +++// Compare signed int with zero and branch near instructions +++instruct cmpI_reg_imm0_branch(cmpOp cmp, iRegI op1, immI0 zero, label lbl) +++%{ +++ // Same match rule as `far_cmpI_reg_imm0_branch'. +++ match(If cmp (CmpI op1 zero)); +++ +++ effect(USE op1, USE lbl); +++ +++ ins_cost(BRANCH_COST); +++ format %{ "b$cmp $op1, zr, $lbl\t#@cmpI_reg_imm0_branch" %} +++ +++ ins_encode %{ +++ __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), zr, *($lbl$$label)); +++ %} +++ +++ ins_pipe(pipe_cmpz_branch); +++ ins_short_branch(1); +++%} +++ +++instruct cmpI_reg_imm0_loop(cmpOp cmp, iRegI op1, immI0 zero, label lbl) +++%{ +++ // Same match rule as `far_cmpI_reg_imm0_loop'. +++ match(CountedLoopEnd cmp (CmpI op1 zero)); +++ +++ effect(USE op1, USE lbl); +++ +++ ins_cost(BRANCH_COST); +++ +++ format %{ "b$cmp $op1, zr, $lbl\t#@cmpI_reg_imm0_loop" %} +++ +++ ins_encode %{ +++ __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), zr, *($lbl$$label)); +++ %} +++ +++ ins_pipe(pipe_cmpz_branch); +++ ins_short_branch(1); +++%} +++ +++// Compare unsigned int with zero and branch near instructions +++instruct cmpUEqNeLeGt_reg_imm0_branch(cmpOpUEqNeLeGt cmp, iRegI op1, immI0 zero, label lbl) +++%{ +++ // Same match rule as `far_cmpUEqNeLeGt_reg_imm0_branch'. +++ match(If cmp (CmpU op1 zero)); +++ +++ effect(USE op1, USE lbl); +++ +++ ins_cost(BRANCH_COST); +++ +++ format %{ "b$cmp $op1, zr, $lbl\t#@cmpUEqNeLeGt_reg_imm0_branch" %} +++ +++ ins_encode %{ +++ __ enc_cmpUEqNeLeGt_imm0_branch($cmp$$cmpcode, as_Register($op1$$reg), *($lbl$$label)); +++ %} +++ +++ ins_pipe(pipe_cmpz_branch); +++ ins_short_branch(1); +++%} +++ +++instruct cmpUEqNeLeGt_reg_imm0_loop(cmpOpUEqNeLeGt cmp, iRegI op1, immI0 zero, label lbl) +++%{ +++ // Same match rule as `far_cmpUEqNeLeGt_reg_imm0_loop'. +++ match(CountedLoopEnd cmp (CmpU op1 zero)); +++ +++ effect(USE op1, USE lbl); +++ +++ ins_cost(BRANCH_COST); +++ +++ format %{ "b$cmp $op1, zr, $lbl\t#@cmpUEqNeLeGt_reg_imm0_loop" %} +++ +++ +++ ins_encode %{ +++ __ enc_cmpUEqNeLeGt_imm0_branch($cmp$$cmpcode, as_Register($op1$$reg), *($lbl$$label)); +++ %} +++ +++ ins_pipe(pipe_cmpz_branch); +++ ins_short_branch(1); +++%} +++ +++// Compare signed long with zero and branch near instructions +++instruct cmpL_reg_imm0_branch(cmpOp cmp, iRegL op1, immL0 zero, label lbl) +++%{ +++ // Same match rule as `far_cmpL_reg_imm0_branch'. +++ match(If cmp (CmpL op1 zero)); +++ +++ effect(USE op1, USE lbl); +++ +++ ins_cost(BRANCH_COST); +++ +++ format %{ "b$cmp $op1, zr, $lbl\t#@cmpL_reg_imm0_branch" %} +++ +++ ins_encode %{ +++ __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), zr, *($lbl$$label)); +++ %} +++ +++ ins_pipe(pipe_cmpz_branch); +++ ins_short_branch(1); +++%} +++ +++instruct cmpL_reg_imm0_loop(cmpOp cmp, iRegL op1, immL0 zero, label lbl) +++%{ +++ // Same match rule as `far_cmpL_reg_imm0_loop'. +++ match(CountedLoopEnd cmp (CmpL op1 zero)); +++ +++ effect(USE op1, USE lbl); +++ +++ ins_cost(BRANCH_COST); +++ +++ format %{ "b$cmp $op1, zr, $lbl\t#@cmpL_reg_imm0_loop" %} +++ +++ ins_encode %{ +++ __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), zr, *($lbl$$label)); +++ %} +++ +++ ins_pipe(pipe_cmpz_branch); +++ ins_short_branch(1); +++%} +++ +++// Compare unsigned long with zero and branch near instructions +++instruct cmpULEqNeLeGt_reg_imm0_branch(cmpOpUEqNeLeGt cmp, iRegL op1, immL0 zero, label lbl) +++%{ +++ // Same match rule as `far_cmpULEqNeLeGt_reg_imm0_branch'. +++ match(If cmp (CmpUL op1 zero)); +++ +++ effect(USE op1, USE lbl); +++ +++ ins_cost(BRANCH_COST); +++ +++ format %{ "b$cmp $op1, zr, $lbl\t#@cmpULEqNeLeGt_reg_imm0_branch" %} +++ +++ ins_encode %{ +++ __ enc_cmpUEqNeLeGt_imm0_branch($cmp$$cmpcode, as_Register($op1$$reg), *($lbl$$label)); +++ %} +++ +++ ins_pipe(pipe_cmpz_branch); +++ ins_short_branch(1); +++%} +++ +++instruct cmpULEqNeLeGt_reg_imm0_loop(cmpOpUEqNeLeGt cmp, iRegL op1, immL0 zero, label lbl) +++%{ +++ // Same match rule as `far_cmpULEqNeLeGt_reg_imm0_loop'. +++ match(CountedLoopEnd cmp (CmpUL op1 zero)); +++ +++ effect(USE op1, USE lbl); +++ +++ ins_cost(BRANCH_COST); +++ +++ format %{ "b$cmp $op1, zr, $lbl\t#@cmpULEqNeLeGt_reg_imm0_loop" %} +++ +++ ins_encode %{ +++ __ enc_cmpUEqNeLeGt_imm0_branch($cmp$$cmpcode, as_Register($op1$$reg), *($lbl$$label)); +++ %} +++ +++ ins_pipe(pipe_cmpz_branch); +++ ins_short_branch(1); +++%} +++ +++// Compare pointer with zero and branch near instructions +++instruct cmpP_imm0_branch(cmpOpEqNe cmp, iRegP op1, immP0 zero, label lbl) %{ +++ // Same match rule as `far_cmpP_reg_imm0_branch'. +++ match(If cmp (CmpP op1 zero)); +++ effect(USE lbl); +++ +++ ins_cost(BRANCH_COST); +++ format %{ "b$cmp $op1, zr, $lbl\t#@cmpP_imm0_branch" %} +++ +++ ins_encode %{ +++ __ enc_cmpEqNe_imm0_branch($cmp$$cmpcode, as_Register($op1$$reg), *($lbl$$label)); +++ %} +++ +++ ins_pipe(pipe_cmpz_branch); +++ ins_short_branch(1); +++%} +++ +++instruct cmpP_imm0_loop(cmpOpEqNe cmp, iRegP op1, immP0 zero, label lbl) %{ +++ // Same match rule as `far_cmpP_reg_imm0_loop'. +++ match(CountedLoopEnd cmp (CmpP op1 zero)); +++ effect(USE lbl); +++ +++ ins_cost(BRANCH_COST); +++ format %{ "b$cmp $op1, zr, $lbl\t#@cmpP_imm0_loop" %} +++ +++ ins_encode %{ +++ __ enc_cmpEqNe_imm0_branch($cmp$$cmpcode, as_Register($op1$$reg), *($lbl$$label)); +++ %} +++ +++ ins_pipe(pipe_cmpz_branch); +++ ins_short_branch(1); +++%} +++ +++// Compare narrow pointer with zero and branch near instructions +++instruct cmpN_imm0_branch(cmpOpEqNe cmp, iRegN op1, immN0 zero, label lbl) %{ +++ // Same match rule as `far_cmpN_reg_imm0_branch'. +++ match(If cmp (CmpN op1 zero)); +++ effect(USE lbl); +++ +++ ins_cost(BRANCH_COST); +++ +++ format %{ "b$cmp $op1, zr, $lbl\t#@cmpN_imm0_branch" %} +++ +++ ins_encode %{ +++ __ enc_cmpEqNe_imm0_branch($cmp$$cmpcode, as_Register($op1$$reg), *($lbl$$label)); +++ %} +++ +++ ins_pipe(pipe_cmpz_branch); +++ ins_short_branch(1); +++%} +++ +++instruct cmpN_imm0_loop(cmpOpEqNe cmp, iRegN op1, immN0 zero, label lbl) %{ +++ // Same match rule as `far_cmpN_reg_imm0_loop'. +++ match(CountedLoopEnd cmp (CmpN op1 zero)); +++ effect(USE lbl); +++ +++ ins_cost(BRANCH_COST); +++ +++ format %{ "b$cmp $op1, zr, $lbl\t#@cmpN_imm0_loop" %} +++ +++ ins_encode %{ +++ __ enc_cmpEqNe_imm0_branch($cmp$$cmpcode, as_Register($op1$$reg), *($lbl$$label)); +++ %} +++ +++ ins_pipe(pipe_cmpz_branch); +++ ins_short_branch(1); +++%} +++ +++// Compare narrow pointer with pointer zero and branch near instructions +++instruct cmpP_narrowOop_imm0_branch(cmpOpEqNe cmp, iRegN op1, immP0 zero, label lbl) %{ +++ // Same match rule as `far_cmpP_narrowOop_imm0_branch'. +++ match(If cmp (CmpP (DecodeN op1) zero)); +++ effect(USE lbl); +++ +++ ins_cost(BRANCH_COST); +++ format %{ "b$cmp $op1, zr, $lbl\t#@cmpP_narrowOop_imm0_branch" %} +++ +++ ins_encode %{ +++ __ enc_cmpEqNe_imm0_branch($cmp$$cmpcode, as_Register($op1$$reg), *($lbl$$label)); +++ %} +++ +++ ins_pipe(pipe_cmpz_branch); +++ ins_short_branch(1); +++%} +++ +++instruct cmpP_narrowOop_imm0_loop(cmpOpEqNe cmp, iRegN op1, immP0 zero, label lbl) %{ +++ // Same match rule as `far_cmpP_narrowOop_imm0_loop'. +++ match(CountedLoopEnd cmp (CmpP (DecodeN op1) zero)); +++ effect(USE lbl); +++ +++ ins_cost(BRANCH_COST); +++ format %{ "b$cmp $op1, zr, $lbl\t#@cmpP_narrowOop_imm0_loop" %} +++ +++ ins_encode %{ +++ __ enc_cmpEqNe_imm0_branch($cmp$$cmpcode, as_Register($op1$$reg), *($lbl$$label)); +++ %} +++ +++ ins_pipe(pipe_cmpz_branch); +++ ins_short_branch(1); +++%} +++ +++// Patterns for far (20KiB) variants +++ +++instruct far_cmpFlag_branch(cmpOp cmp, rFlagsReg cr, label lbl) %{ +++ match(If cmp cr); +++ effect(USE lbl); +++ +++ ins_cost(BRANCH_COST); +++ format %{ "far_b$cmp $cr, zr, $lbl\t#@far_cmpFlag_branch"%} +++ +++ ins_encode %{ +++ __ enc_cmpEqNe_imm0_branch($cmp$$cmpcode, as_Register($cr$$reg), *($lbl$$label), /* is_far */ true); +++ %} +++ +++ ins_pipe(pipe_cmpz_branch); +++%} +++ +++// Compare signed int and branch far instructions +++instruct far_cmpI_branch(cmpOp cmp, iRegI op1, iRegI op2, label lbl) %{ +++ match(If cmp (CmpI op1 op2)); +++ effect(USE lbl); +++ +++ ins_cost(BRANCH_COST * 2); +++ +++ // the format instruction [far_b$cmp] here is be used as two insructions +++ // in macroassembler: b$not_cmp(op1, op2, done), j($lbl), bind(done) +++ format %{ "far_b$cmp $op1, $op2, $lbl\t#@far_cmpI_branch" %} +++ +++ ins_encode %{ +++ __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), as_Register($op2$$reg), *($lbl$$label), /* is_far */ true); +++ %} +++ +++ ins_pipe(pipe_cmp_branch); +++%} +++ +++instruct far_cmpI_loop(cmpOp cmp, iRegI op1, iRegI op2, label lbl) %{ +++ match(CountedLoopEnd cmp (CmpI op1 op2)); +++ effect(USE lbl); +++ +++ ins_cost(BRANCH_COST * 2); +++ format %{ "far_b$cmp $op1, $op2, $lbl\t#@far_cmpI_loop" %} +++ +++ ins_encode %{ +++ __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), as_Register($op2$$reg), *($lbl$$label), /* is_far */ true); +++ %} +++ +++ ins_pipe(pipe_cmp_branch); +++%} +++ +++instruct far_cmpU_branch(cmpOpU cmp, iRegI op1, iRegI op2, label lbl) %{ +++ match(If cmp (CmpU op1 op2)); +++ effect(USE lbl); +++ +++ ins_cost(BRANCH_COST * 2); +++ format %{ "far_b$cmp $op1, $op2, $lbl\t#@far_cmpU_branch" %} +++ +++ ins_encode %{ +++ __ cmp_branch($cmp$$cmpcode | C2_MacroAssembler::unsigned_branch_mask, as_Register($op1$$reg), +++ as_Register($op2$$reg), *($lbl$$label), /* is_far */ true); +++ %} +++ +++ ins_pipe(pipe_cmp_branch); +++%} +++ +++instruct far_cmpU_loop(cmpOpU cmp, iRegI op1, iRegI op2, label lbl) %{ +++ match(CountedLoopEnd cmp (CmpU op1 op2)); +++ effect(USE lbl); +++ +++ ins_cost(BRANCH_COST * 2); +++ format %{ "far_b$cmp $op1, $op2, $lbl\t#@far_cmpU_loop" %} +++ +++ ins_encode %{ +++ __ cmp_branch($cmp$$cmpcode | C2_MacroAssembler::unsigned_branch_mask, as_Register($op1$$reg), +++ as_Register($op2$$reg), *($lbl$$label), /* is_far */ true); +++ %} +++ +++ ins_pipe(pipe_cmp_branch); +++%} +++ +++instruct far_cmpL_branch(cmpOp cmp, iRegL op1, iRegL op2, label lbl) %{ +++ match(If cmp (CmpL op1 op2)); +++ effect(USE lbl); +++ +++ ins_cost(BRANCH_COST * 2); +++ format %{ "far_b$cmp $op1, $op2, $lbl\t#@far_cmpL_branch" %} +++ +++ ins_encode %{ +++ __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), as_Register($op2$$reg), *($lbl$$label), /* is_far */ true); +++ %} +++ +++ ins_pipe(pipe_cmp_branch); +++%} +++ +++instruct far_cmpLloop(cmpOp cmp, iRegL op1, iRegL op2, label lbl) %{ +++ match(CountedLoopEnd cmp (CmpL op1 op2)); +++ effect(USE lbl); +++ +++ ins_cost(BRANCH_COST * 2); +++ format %{ "far_b$cmp $op1, $op2, $lbl\t#@far_cmpL_loop" %} +++ +++ ins_encode %{ +++ __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), as_Register($op2$$reg), *($lbl$$label), /* is_far */ true); +++ %} +++ +++ ins_pipe(pipe_cmp_branch); +++%} +++ +++instruct far_cmpUL_branch(cmpOpU cmp, iRegL op1, iRegL op2, label lbl) %{ +++ match(If cmp (CmpUL op1 op2)); +++ effect(USE lbl); +++ +++ ins_cost(BRANCH_COST * 2); +++ format %{ "far_b$cmp $op1, $op2, $lbl\t#@far_cmpUL_branch" %} +++ +++ ins_encode %{ +++ __ cmp_branch($cmp$$cmpcode | C2_MacroAssembler::unsigned_branch_mask, as_Register($op1$$reg), +++ as_Register($op2$$reg), *($lbl$$label), /* is_far */ true); +++ %} +++ +++ ins_pipe(pipe_cmp_branch); +++%} +++ +++instruct far_cmpUL_loop(cmpOpU cmp, iRegL op1, iRegL op2, label lbl) %{ +++ match(CountedLoopEnd cmp (CmpUL op1 op2)); +++ effect(USE lbl); +++ +++ ins_cost(BRANCH_COST * 2); +++ format %{ "far_b$cmp $op1, $op2, $lbl\t#@far_cmpUL_loop" %} +++ +++ ins_encode %{ +++ __ cmp_branch($cmp$$cmpcode | C2_MacroAssembler::unsigned_branch_mask, as_Register($op1$$reg), +++ as_Register($op2$$reg), *($lbl$$label), /* is_far */ true); +++ %} +++ +++ ins_pipe(pipe_cmp_branch); +++%} +++ +++instruct far_cmpP_branch(cmpOpU cmp, iRegP op1, iRegP op2, label lbl) +++%{ +++ match(If cmp (CmpP op1 op2)); +++ +++ effect(USE lbl); +++ +++ ins_cost(BRANCH_COST * 2); +++ +++ format %{ "far_b$cmp $op1, $op2, $lbl\t#@far_cmpP_branch" %} +++ +++ ins_encode %{ +++ __ cmp_branch($cmp$$cmpcode | C2_MacroAssembler::unsigned_branch_mask, as_Register($op1$$reg), +++ as_Register($op2$$reg), *($lbl$$label), /* is_far */ true); +++ %} +++ +++ ins_pipe(pipe_cmp_branch); +++%} +++ +++instruct far_cmpP_loop(cmpOpU cmp, iRegP op1, iRegP op2, label lbl) +++%{ +++ match(CountedLoopEnd cmp (CmpP op1 op2)); +++ +++ effect(USE lbl); +++ +++ ins_cost(BRANCH_COST * 2); +++ +++ format %{ "far_b$cmp $op1, $op2, $lbl\t#@far_cmpP_loop" %} +++ +++ ins_encode %{ +++ __ cmp_branch($cmp$$cmpcode | C2_MacroAssembler::unsigned_branch_mask, as_Register($op1$$reg), +++ as_Register($op2$$reg), *($lbl$$label), /* is_far */ true); +++ %} +++ +++ ins_pipe(pipe_cmp_branch); +++%} +++ +++instruct far_cmpN_branch(cmpOpU cmp, iRegN op1, iRegN op2, label lbl) +++%{ +++ match(If cmp (CmpN op1 op2)); +++ +++ effect(USE lbl); +++ +++ ins_cost(BRANCH_COST * 2); +++ +++ format %{ "far_b$cmp $op1, $op2, $lbl\t#@far_cmpN_branch" %} +++ +++ ins_encode %{ +++ __ cmp_branch($cmp$$cmpcode | C2_MacroAssembler::unsigned_branch_mask, as_Register($op1$$reg), +++ as_Register($op2$$reg), *($lbl$$label), /* is_far */ true); +++ %} +++ +++ ins_pipe(pipe_cmp_branch); +++%} +++ +++instruct far_cmpN_loop(cmpOpU cmp, iRegN op1, iRegN op2, label lbl) +++%{ +++ match(CountedLoopEnd cmp (CmpN op1 op2)); +++ +++ effect(USE lbl); +++ +++ ins_cost(BRANCH_COST * 2); +++ +++ format %{ "far_b$cmp $op1, $op2, $lbl\t#@far_cmpN_loop" %} +++ +++ ins_encode %{ +++ __ cmp_branch($cmp$$cmpcode | C2_MacroAssembler::unsigned_branch_mask, as_Register($op1$$reg), +++ as_Register($op2$$reg), *($lbl$$label), /* is_far */ true); +++ %} +++ +++ ins_pipe(pipe_cmp_branch); +++%} +++ +++// Float compare and branch instructions +++instruct far_cmpF_branch(cmpOp cmp, fRegF op1, fRegF op2, label lbl) +++%{ +++ match(If cmp (CmpF op1 op2)); +++ +++ effect(USE lbl); +++ +++ ins_cost(XFER_COST + BRANCH_COST * 2); +++ format %{ "far_float_b$cmp $op1, $op2, $lbl\t#@far_cmpF_branch"%} +++ +++ ins_encode %{ +++ __ float_cmp_branch($cmp$$cmpcode, as_FloatRegister($op1$$reg), as_FloatRegister($op2$$reg), +++ *($lbl$$label), /* is_far */ true); +++ %} +++ +++ ins_pipe(pipe_class_compare); +++%} +++ +++instruct far_cmpF_loop(cmpOp cmp, fRegF op1, fRegF op2, label lbl) +++%{ +++ match(CountedLoopEnd cmp (CmpF op1 op2)); +++ effect(USE lbl); +++ +++ ins_cost(XFER_COST + BRANCH_COST * 2); +++ format %{ "far_float_b$cmp $op1, $op2, $lbl\t#@far_cmpF_loop"%} +++ +++ ins_encode %{ +++ __ float_cmp_branch($cmp$$cmpcode, as_FloatRegister($op1$$reg), as_FloatRegister($op2$$reg), +++ *($lbl$$label), /* is_far */ true); +++ %} +++ +++ ins_pipe(pipe_class_compare); +++%} +++ +++// Double compare and branch instructions +++instruct far_cmpD_branch(cmpOp cmp, fRegD op1, fRegD op2, label lbl) +++%{ +++ match(If cmp (CmpD op1 op2)); +++ effect(USE lbl); +++ +++ ins_cost(XFER_COST + BRANCH_COST * 2); +++ format %{ "far_double_b$cmp $op1, $op2, $lbl\t#@far_cmpD_branch"%} +++ +++ ins_encode %{ +++ __ float_cmp_branch($cmp$$cmpcode | C2_MacroAssembler::double_branch_mask, as_FloatRegister($op1$$reg), +++ as_FloatRegister($op2$$reg), *($lbl$$label), /* is_far */ true); +++ %} +++ +++ ins_pipe(pipe_class_compare); +++%} +++ +++instruct far_cmpD_loop(cmpOp cmp, fRegD op1, fRegD op2, label lbl) +++%{ +++ match(CountedLoopEnd cmp (CmpD op1 op2)); +++ effect(USE lbl); +++ +++ ins_cost(XFER_COST + BRANCH_COST * 2); +++ format %{ "far_double_b$cmp $op1, $op2, $lbl\t#@far_cmpD_loop"%} +++ +++ ins_encode %{ +++ __ float_cmp_branch($cmp$$cmpcode | C2_MacroAssembler::double_branch_mask, as_FloatRegister($op1$$reg), +++ as_FloatRegister($op2$$reg), *($lbl$$label), /* is_far */ true); +++ %} +++ +++ ins_pipe(pipe_class_compare); +++%} +++ +++instruct far_cmpI_reg_imm0_branch(cmpOp cmp, iRegI op1, immI0 zero, label lbl) +++%{ +++ match(If cmp (CmpI op1 zero)); +++ +++ effect(USE op1, USE lbl); +++ +++ ins_cost(BRANCH_COST * 2); +++ +++ format %{ "far_b$cmp $op1, zr, $lbl\t#@far_cmpI_reg_imm0_branch" %} +++ +++ ins_encode %{ +++ __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), zr, *($lbl$$label), /* is_far */ true); +++ %} +++ +++ ins_pipe(pipe_cmpz_branch); +++%} +++ +++instruct far_cmpI_reg_imm0_loop(cmpOp cmp, iRegI op1, immI0 zero, label lbl) +++%{ +++ match(CountedLoopEnd cmp (CmpI op1 zero)); +++ +++ effect(USE op1, USE lbl); +++ +++ ins_cost(BRANCH_COST * 2); +++ +++ format %{ "far_b$cmp $op1, zr, $lbl\t#@far_cmpI_reg_imm0_loop" %} +++ +++ ins_encode %{ +++ __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), zr, *($lbl$$label), /* is_far */ true); +++ %} +++ +++ ins_pipe(pipe_cmpz_branch); +++%} +++ +++instruct far_cmpUEqNeLeGt_imm0_branch(cmpOpUEqNeLeGt cmp, iRegI op1, immI0 zero, label lbl) +++%{ +++ match(If cmp (CmpU op1 zero)); +++ +++ effect(USE op1, USE lbl); +++ +++ ins_cost(BRANCH_COST * 2); +++ +++ format %{ "far_b$cmp $op1, zr, $lbl\t#@far_cmpUEqNeLeGt_imm0_branch" %} +++ +++ ins_encode %{ +++ __ enc_cmpUEqNeLeGt_imm0_branch($cmp$$cmpcode, as_Register($op1$$reg), *($lbl$$label), /* is_far */ true); +++ %} +++ +++ ins_pipe(pipe_cmpz_branch); +++%} +++ +++instruct far_cmpUEqNeLeGt_reg_imm0_loop(cmpOpUEqNeLeGt cmp, iRegI op1, immI0 zero, label lbl) +++%{ +++ match(CountedLoopEnd cmp (CmpU op1 zero)); +++ +++ effect(USE op1, USE lbl); +++ +++ ins_cost(BRANCH_COST * 2); +++ +++ format %{ "far_b$cmp $op1, zr, $lbl\t#@far_cmpUEqNeLeGt_reg_imm0_loop" %} +++ +++ +++ ins_encode %{ +++ __ enc_cmpUEqNeLeGt_imm0_branch($cmp$$cmpcode, as_Register($op1$$reg), *($lbl$$label), /* is_far */ true); +++ %} +++ +++ ins_pipe(pipe_cmpz_branch); +++%} +++ +++// compare lt/ge unsigned instructs has no short instruct with same match +++instruct far_cmpULtGe_reg_imm0_branch(cmpOpULtGe cmp, iRegI op1, immI0 zero, label lbl) +++%{ +++ match(If cmp (CmpU op1 zero)); +++ +++ effect(USE op1, USE lbl); +++ +++ ins_cost(BRANCH_COST); +++ +++ format %{ "j $lbl if $cmp == ge\t#@far_cmpULtGe_reg_imm0_branch" %} +++ +++ ins_encode(riscv_enc_far_cmpULtGe_imm0_branch(cmp, op1, lbl)); +++ +++ ins_pipe(pipe_cmpz_branch); +++%} +++ +++instruct far_cmpULtGe_reg_imm0_loop(cmpOpULtGe cmp, iRegI op1, immI0 zero, label lbl) +++%{ +++ match(CountedLoopEnd cmp (CmpU op1 zero)); +++ +++ effect(USE op1, USE lbl); +++ +++ ins_cost(BRANCH_COST); +++ +++ format %{ "j $lbl if $cmp == ge\t#@far_cmpULtGe_reg_imm0_loop" %} +++ +++ ins_encode(riscv_enc_far_cmpULtGe_imm0_branch(cmp, op1, lbl)); +++ +++ ins_pipe(pipe_cmpz_branch); +++%} +++ +++instruct far_cmpL_reg_imm0_branch(cmpOp cmp, iRegL op1, immL0 zero, label lbl) +++%{ +++ match(If cmp (CmpL op1 zero)); +++ +++ effect(USE op1, USE lbl); +++ +++ ins_cost(BRANCH_COST * 2); +++ +++ format %{ "far_b$cmp $op1, zr, $lbl\t#@far_cmpL_reg_imm0_branch" %} +++ +++ ins_encode %{ +++ __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), zr, *($lbl$$label), /* is_far */ true); +++ %} +++ +++ ins_pipe(pipe_cmpz_branch); +++%} +++ +++instruct far_cmpL_reg_imm0_loop(cmpOp cmp, iRegL op1, immL0 zero, label lbl) +++%{ +++ match(CountedLoopEnd cmp (CmpL op1 zero)); +++ +++ effect(USE op1, USE lbl); +++ +++ ins_cost(BRANCH_COST * 2); +++ +++ format %{ "far_b$cmp $op1, zr, $lbl\t#@far_cmpL_reg_imm0_loop" %} +++ +++ ins_encode %{ +++ __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), zr, *($lbl$$label), /* is_far */ true); +++ %} +++ +++ ins_pipe(pipe_cmpz_branch); +++%} +++ +++instruct far_cmpULEqNeLeGt_reg_imm0_branch(cmpOpUEqNeLeGt cmp, iRegL op1, immL0 zero, label lbl) +++%{ +++ match(If cmp (CmpUL op1 zero)); +++ +++ effect(USE op1, USE lbl); +++ +++ ins_cost(BRANCH_COST * 2); +++ +++ format %{ "far_b$cmp $op1, zr, $lbl\t#@far_cmpULEqNeLeGt_reg_imm0_branch" %} +++ +++ ins_encode %{ +++ __ enc_cmpUEqNeLeGt_imm0_branch($cmp$$cmpcode, as_Register($op1$$reg), *($lbl$$label), /* is_far */ true); +++ %} +++ +++ ins_pipe(pipe_cmpz_branch); +++%} +++ +++instruct far_cmpULEqNeLeGt_reg_imm0_loop(cmpOpUEqNeLeGt cmp, iRegL op1, immL0 zero, label lbl) +++%{ +++ match(CountedLoopEnd cmp (CmpUL op1 zero)); +++ +++ effect(USE op1, USE lbl); +++ +++ ins_cost(BRANCH_COST * 2); +++ +++ format %{ "far_b$cmp $op1, zr, $lbl\t#@far_cmpULEqNeLeGt_reg_imm0_loop" %} +++ +++ ins_encode %{ +++ __ enc_cmpUEqNeLeGt_imm0_branch($cmp$$cmpcode, as_Register($op1$$reg), *($lbl$$label), /* is_far */ true); +++ %} +++ +++ ins_pipe(pipe_cmpz_branch); +++%} +++ +++// compare lt/ge unsigned instructs has no short instruct with same match +++instruct far_cmpULLtGe_reg_imm0_branch(cmpOpULtGe cmp, iRegL op1, immL0 zero, label lbl) +++%{ +++ match(If cmp (CmpUL op1 zero)); +++ +++ effect(USE op1, USE lbl); +++ +++ ins_cost(BRANCH_COST); +++ +++ format %{ "j $lbl if $cmp == ge\t#@far_cmpULLtGe_reg_imm0_branch" %} +++ +++ ins_encode(riscv_enc_far_cmpULtGe_imm0_branch(cmp, op1, lbl)); +++ +++ ins_pipe(pipe_cmpz_branch); +++%} +++ +++instruct far_cmpULLtGe_reg_imm0_loop(cmpOpULtGe cmp, iRegL op1, immL0 zero, label lbl) +++%{ +++ match(CountedLoopEnd cmp (CmpUL op1 zero)); +++ +++ effect(USE op1, USE lbl); +++ +++ ins_cost(BRANCH_COST); +++ +++ format %{ "j $lbl if $cmp == ge\t#@far_cmpULLtGe_reg_imm0_loop" %} +++ +++ ins_encode(riscv_enc_far_cmpULtGe_imm0_branch(cmp, op1, lbl)); +++ +++ ins_pipe(pipe_cmpz_branch); +++%} +++ +++instruct far_cmpP_imm0_branch(cmpOpEqNe cmp, iRegP op1, immP0 zero, label lbl) %{ +++ match(If cmp (CmpP op1 zero)); +++ effect(USE lbl); +++ +++ ins_cost(BRANCH_COST * 2); +++ format %{ "far_b$cmp $op1, zr, $lbl\t#@far_cmpP_imm0_branch" %} +++ +++ ins_encode %{ +++ __ enc_cmpEqNe_imm0_branch($cmp$$cmpcode, as_Register($op1$$reg), *($lbl$$label), /* is_far */ true); +++ %} +++ +++ ins_pipe(pipe_cmpz_branch); +++%} +++ +++instruct far_cmpP_imm0_loop(cmpOpEqNe cmp, iRegP op1, immP0 zero, label lbl) %{ +++ match(CountedLoopEnd cmp (CmpP op1 zero)); +++ effect(USE lbl); +++ +++ ins_cost(BRANCH_COST * 2); +++ format %{ "far_b$cmp $op1, zr, $lbl\t#@far_cmpP_imm0_loop" %} +++ +++ ins_encode %{ +++ __ enc_cmpEqNe_imm0_branch($cmp$$cmpcode, as_Register($op1$$reg), *($lbl$$label), /* is_far */ true); +++ %} +++ +++ ins_pipe(pipe_cmpz_branch); +++%} +++ +++instruct far_cmpN_imm0_branch(cmpOpEqNe cmp, iRegN op1, immN0 zero, label lbl) %{ +++ match(If cmp (CmpN op1 zero)); +++ effect(USE lbl); +++ +++ ins_cost(BRANCH_COST * 2); +++ +++ format %{ "far_b$cmp $op1, zr, $lbl\t#@far_cmpN_imm0_branch" %} +++ +++ ins_encode %{ +++ __ enc_cmpEqNe_imm0_branch($cmp$$cmpcode, as_Register($op1$$reg), *($lbl$$label), /* is_far */ true); +++ %} +++ +++ ins_pipe(pipe_cmpz_branch); +++%} +++ +++instruct far_cmpN_imm0_loop(cmpOpEqNe cmp, iRegN op1, immN0 zero, label lbl) %{ +++ match(CountedLoopEnd cmp (CmpN op1 zero)); +++ effect(USE lbl); +++ +++ ins_cost(BRANCH_COST * 2); +++ +++ format %{ "far_b$cmp $op1, zr, $lbl\t#@far_cmpN_imm0_loop" %} +++ +++ ins_encode %{ +++ __ enc_cmpEqNe_imm0_branch($cmp$$cmpcode, as_Register($op1$$reg), *($lbl$$label), /* is_far */ true); +++ %} +++ +++ ins_pipe(pipe_cmpz_branch); +++%} +++ +++instruct far_cmpP_narrowOop_imm0_branch(cmpOpEqNe cmp, iRegN op1, immP0 zero, label lbl) %{ +++ match(If cmp (CmpP (DecodeN op1) zero)); +++ effect(USE lbl); +++ +++ ins_cost(BRANCH_COST * 2); +++ format %{ "far_b$cmp $op1, zr, $lbl\t#@far_cmpP_narrowOop_imm0_branch" %} +++ +++ ins_encode %{ +++ __ enc_cmpEqNe_imm0_branch($cmp$$cmpcode, as_Register($op1$$reg), *($lbl$$label), /* is_far */ true); +++ %} +++ +++ ins_pipe(pipe_cmpz_branch); +++%} +++ +++instruct far_cmpP_narrowOop_imm0_loop(cmpOpEqNe cmp, iRegN op1, immP0 zero, label lbl) %{ +++ match(CountedLoopEnd cmp (CmpP (DecodeN op1) zero)); +++ effect(USE lbl); +++ +++ ins_cost(BRANCH_COST * 2); +++ format %{ "far_b$cmp $op1, zr, $lbl\t#@far_cmpP_narrowOop_imm0_loop" %} +++ +++ ins_encode %{ +++ __ enc_cmpEqNe_imm0_branch($cmp$$cmpcode, as_Register($op1$$reg), *($lbl$$label), /* is_far */ true); +++ %} +++ +++ ins_pipe(pipe_cmpz_branch); +++%} +++ +++// ============================================================================ +++// Conditional Move Instructions +++instruct cmovI_cmpI(iRegINoSp dst, iRegI src, iRegI op1, iRegI op2, cmpOp cop) %{ +++ match(Set dst (CMoveI (Binary cop (CmpI op1 op2)) (Binary dst src))); +++ ins_cost(ALU_COST + BRANCH_COST); +++ +++ format %{ +++ "CMove $dst, ($op1 $cop $op2), $dst, $src\t#@cmovI_cmpI\n\t" +++ %} +++ +++ ins_encode %{ +++ __ enc_cmove($cop$$cmpcode, +++ as_Register($op1$$reg), as_Register($op2$$reg), +++ as_Register($dst$$reg), as_Register($src$$reg)); +++ %} +++ +++ ins_pipe(pipe_class_compare); +++%} +++ +++instruct cmovI_cmpU(iRegINoSp dst, iRegI src, iRegI op1, iRegI op2, cmpOpU cop) %{ +++ match(Set dst (CMoveI (Binary cop (CmpU op1 op2)) (Binary dst src))); +++ ins_cost(ALU_COST + BRANCH_COST); +++ +++ format %{ +++ "CMove $dst, ($op1 $cop $op2), $dst, $src\t#@cmovI_cmpU\n\t" +++ %} +++ +++ ins_encode %{ +++ __ enc_cmove($cop$$cmpcode | C2_MacroAssembler::unsigned_branch_mask, +++ as_Register($op1$$reg), as_Register($op2$$reg), +++ as_Register($dst$$reg), as_Register($src$$reg)); +++ %} +++ +++ ins_pipe(pipe_class_compare); +++%} +++ +++instruct cmovI_cmpL(iRegINoSp dst, iRegI src, iRegL op1, iRegL op2, cmpOp cop) %{ +++ match(Set dst (CMoveI (Binary cop (CmpL op1 op2)) (Binary dst src))); +++ ins_cost(ALU_COST + BRANCH_COST); +++ +++ format %{ +++ "CMove $dst, ($op1 $cop $op2), $dst, $src\t#@cmovI_cmpL\n\t" +++ %} +++ +++ ins_encode %{ +++ __ enc_cmove($cop$$cmpcode, +++ as_Register($op1$$reg), as_Register($op2$$reg), +++ as_Register($dst$$reg), as_Register($src$$reg)); +++ %} +++ +++ ins_pipe(pipe_class_compare); +++%} +++ +++instruct cmovI_cmpUL(iRegINoSp dst, iRegI src, iRegL op1, iRegL op2, cmpOpU cop) %{ +++ match(Set dst (CMoveI (Binary cop (CmpUL op1 op2)) (Binary dst src))); +++ ins_cost(ALU_COST + BRANCH_COST); +++ +++ format %{ +++ "CMove $dst, ($op1 $cop $op2), $dst, $src\t#@cmovI_cmpUL\n\t" +++ %} +++ +++ ins_encode %{ +++ __ enc_cmove($cop$$cmpcode | C2_MacroAssembler::unsigned_branch_mask, +++ as_Register($op1$$reg), as_Register($op2$$reg), +++ as_Register($dst$$reg), as_Register($src$$reg)); +++ %} +++ +++ ins_pipe(pipe_class_compare); +++%} +++ +++instruct cmovL_cmpL(iRegLNoSp dst, iRegL src, iRegL op1, iRegL op2, cmpOp cop) %{ +++ match(Set dst (CMoveL (Binary cop (CmpL op1 op2)) (Binary dst src))); +++ ins_cost(ALU_COST + BRANCH_COST); +++ +++ format %{ +++ "CMove $dst, ($op1 $cop $op2), $dst, $src\t#@cmovL_cmpL\n\t" +++ %} +++ +++ ins_encode %{ +++ __ enc_cmove($cop$$cmpcode, +++ as_Register($op1$$reg), as_Register($op2$$reg), +++ as_Register($dst$$reg), as_Register($src$$reg)); +++ %} +++ +++ ins_pipe(pipe_class_compare); +++%} +++ +++instruct cmovL_cmpUL(iRegLNoSp dst, iRegL src, iRegL op1, iRegL op2, cmpOpU cop) %{ +++ match(Set dst (CMoveL (Binary cop (CmpUL op1 op2)) (Binary dst src))); +++ ins_cost(ALU_COST + BRANCH_COST); +++ +++ format %{ +++ "CMove $dst, ($op1 $cop $op2), $dst, $src\t#@cmovL_cmpUL\n\t" +++ %} +++ +++ ins_encode %{ +++ __ enc_cmove($cop$$cmpcode | C2_MacroAssembler::unsigned_branch_mask, +++ as_Register($op1$$reg), as_Register($op2$$reg), +++ as_Register($dst$$reg), as_Register($src$$reg)); +++ %} +++ +++ ins_pipe(pipe_class_compare); +++%} +++ +++instruct cmovL_cmpI(iRegLNoSp dst, iRegL src, iRegI op1, iRegI op2, cmpOp cop) %{ +++ match(Set dst (CMoveL (Binary cop (CmpI op1 op2)) (Binary dst src))); +++ ins_cost(ALU_COST + BRANCH_COST); +++ +++ format %{ +++ "CMove $dst, ($op1 $cop $op2), $dst, $src\t#@cmovL_cmpI\n\t" +++ %} +++ +++ ins_encode %{ +++ __ enc_cmove($cop$$cmpcode, +++ as_Register($op1$$reg), as_Register($op2$$reg), +++ as_Register($dst$$reg), as_Register($src$$reg)); +++ %} +++ +++ ins_pipe(pipe_class_compare); +++%} +++ +++instruct cmovL_cmpU(iRegLNoSp dst, iRegL src, iRegI op1, iRegI op2, cmpOpU cop) %{ +++ match(Set dst (CMoveL (Binary cop (CmpU op1 op2)) (Binary dst src))); +++ ins_cost(ALU_COST + BRANCH_COST); +++ +++ format %{ +++ "CMove $dst, ($op1 $cop $op2), $dst, $src\t#@cmovL_cmpU\n\t" +++ %} +++ +++ ins_encode %{ +++ __ enc_cmove($cop$$cmpcode | C2_MacroAssembler::unsigned_branch_mask, +++ as_Register($op1$$reg), as_Register($op2$$reg), +++ as_Register($dst$$reg), as_Register($src$$reg)); +++ %} +++ +++ ins_pipe(pipe_class_compare); +++%} +++ +++// ============================================================================ +++// Procedure Call/Return Instructions +++ +++// Call Java Static Instruction +++// Note: If this code changes, the corresponding ret_addr_offset() and +++// compute_padding() functions will have to be adjusted. +++instruct CallStaticJavaDirect(method meth) +++%{ +++ match(CallStaticJava); +++ +++ effect(USE meth); +++ +++ ins_cost(BRANCH_COST); +++ +++ format %{ "CALL,static $meth\t#@CallStaticJavaDirect" %} +++ +++ ins_encode(riscv_enc_java_static_call(meth), +++ riscv_enc_call_epilog); +++ +++ ins_pipe(pipe_class_call); +++ ins_alignment(4); +++%} +++ +++// TO HERE +++ +++// Call Java Dynamic Instruction +++// Note: If this code changes, the corresponding ret_addr_offset() and +++// compute_padding() functions will have to be adjusted. +++instruct CallDynamicJavaDirect(method meth, rFlagsReg cr) +++%{ +++ match(CallDynamicJava); +++ +++ effect(USE meth, KILL cr); +++ +++ ins_cost(BRANCH_COST + ALU_COST * 6); +++ +++ format %{ "CALL,dynamic $meth\t#@CallDynamicJavaDirect" %} +++ +++ ins_encode(riscv_enc_java_dynamic_call(meth), +++ riscv_enc_call_epilog); +++ +++ ins_pipe(pipe_class_call); +++ ins_alignment(4); +++%} +++ +++// Call Runtime Instruction +++ +++instruct CallRuntimeDirect(method meth, rFlagsReg cr) +++%{ +++ match(CallRuntime); +++ +++ effect(USE meth, KILL cr); +++ +++ ins_cost(BRANCH_COST); +++ +++ format %{ "CALL, runtime $meth\t#@CallRuntimeDirect" %} +++ +++ ins_encode(riscv_enc_java_to_runtime(meth)); +++ +++ ins_pipe(pipe_class_call); +++%} +++ +++// Call Runtime Instruction +++ +++instruct CallLeafDirect(method meth, rFlagsReg cr) +++%{ +++ match(CallLeaf); +++ +++ effect(USE meth, KILL cr); +++ +++ ins_cost(BRANCH_COST); +++ +++ format %{ "CALL, runtime leaf $meth\t#@CallLeafDirect" %} +++ +++ ins_encode(riscv_enc_java_to_runtime(meth)); +++ +++ ins_pipe(pipe_class_call); +++%} +++ +++// Call Runtime Instruction +++ +++instruct CallLeafNoFPDirect(method meth, rFlagsReg cr) +++%{ +++ match(CallLeafNoFP); +++ +++ effect(USE meth, KILL cr); +++ +++ ins_cost(BRANCH_COST); +++ +++ format %{ "CALL, runtime leaf nofp $meth\t#@CallLeafNoFPDirect" %} +++ +++ ins_encode(riscv_enc_java_to_runtime(meth)); +++ +++ ins_pipe(pipe_class_call); +++%} +++ +++// ============================================================================ +++// Partial Subtype Check +++// +++// superklass array for an instance of the superklass. Set a hidden +++// internal cache on a hit (cache is checked with exposed code in +++// gen_subtype_check()). Return zero for a hit. The encoding +++// ALSO sets flags. +++ +++instruct partialSubtypeCheck(iRegP_R15 result, iRegP_R14 sub, iRegP_R10 super, iRegP_R12 tmp, rFlagsReg cr) +++%{ +++ match(Set result (PartialSubtypeCheck sub super)); +++ effect(KILL tmp, KILL cr); +++ +++ ins_cost(2 * STORE_COST + 3 * LOAD_COST + 4 * ALU_COST + BRANCH_COST * 4); +++ format %{ "partialSubtypeCheck $result, $sub, $super\t#@partialSubtypeCheck" %} +++ +++ ins_encode(riscv_enc_partial_subtype_check(sub, super, tmp, result)); +++ +++ opcode(0x1); // Force zero of result reg on hit +++ +++ ins_pipe(pipe_class_memory); +++%} +++ +++instruct partialSubtypeCheckVsZero(iRegP_R15 result, iRegP_R14 sub, iRegP_R10 super, iRegP_R12 tmp, +++ immP0 zero, rFlagsReg cr) +++%{ +++ match(Set cr (CmpP (PartialSubtypeCheck sub super) zero)); +++ effect(KILL tmp, KILL result); +++ +++ ins_cost(2 * STORE_COST + 3 * LOAD_COST + 4 * ALU_COST + BRANCH_COST * 4); +++ format %{ "partialSubtypeCheck $result, $sub, $super == 0\t#@partialSubtypeCheckVsZero" %} +++ +++ ins_encode(riscv_enc_partial_subtype_check(sub, super, tmp, result)); +++ +++ opcode(0x0); // Don't zero result reg on hit +++ +++ ins_pipe(pipe_class_memory); +++%} +++ +++instruct string_compareU(iRegP_R11 str1, iRegI_R12 cnt1, iRegP_R13 str2, iRegI_R14 cnt2, +++ iRegI_R10 result, iRegP_R28 tmp1, iRegL_R29 tmp2, iRegL_R30 tmp3, rFlagsReg cr) +++%{ +++ predicate(!UseRVV && ((StrCompNode *)n)->encoding() == StrIntrinsicNode::UU); +++ match(Set result (StrComp(Binary str1 cnt1)(Binary str2 cnt2))); +++ effect(KILL tmp1, KILL tmp2, KILL tmp3, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr); +++ +++ format %{ "String Compare $str1, $cnt1, $str2, $cnt2 -> $result\t#@string_compareU" %} +++ ins_encode %{ +++ // Count is in 8-bit bytes; non-Compact chars are 16 bits. +++ __ string_compare($str1$$Register, $str2$$Register, +++ $cnt1$$Register, $cnt2$$Register, $result$$Register, +++ $tmp1$$Register, $tmp2$$Register, $tmp3$$Register, +++ StrIntrinsicNode::UU); +++ %} +++ ins_pipe(pipe_class_memory); +++%} +++ +++instruct string_compareL(iRegP_R11 str1, iRegI_R12 cnt1, iRegP_R13 str2, iRegI_R14 cnt2, +++ iRegI_R10 result, iRegP_R28 tmp1, iRegL_R29 tmp2, iRegL_R30 tmp3, rFlagsReg cr) +++%{ +++ predicate(!UseRVV && ((StrCompNode *)n)->encoding() == StrIntrinsicNode::LL); +++ match(Set result (StrComp(Binary str1 cnt1)(Binary str2 cnt2))); +++ effect(KILL tmp1, KILL tmp2, KILL tmp3, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr); +++ +++ format %{ "String Compare $str1, $cnt1, $str2, $cnt2 -> $result\t#@string_compareL" %} +++ ins_encode %{ +++ __ string_compare($str1$$Register, $str2$$Register, +++ $cnt1$$Register, $cnt2$$Register, $result$$Register, +++ $tmp1$$Register, $tmp2$$Register, $tmp3$$Register, +++ StrIntrinsicNode::LL); +++ %} +++ ins_pipe(pipe_class_memory); +++%} +++ +++instruct string_compareUL(iRegP_R11 str1, iRegI_R12 cnt1, iRegP_R13 str2, iRegI_R14 cnt2, +++ iRegI_R10 result, iRegP_R28 tmp1, iRegL_R29 tmp2, iRegL_R30 tmp3, rFlagsReg cr) +++%{ +++ predicate(!UseRVV && ((StrCompNode *)n)->encoding() == StrIntrinsicNode::UL); +++ match(Set result (StrComp(Binary str1 cnt1)(Binary str2 cnt2))); +++ effect(KILL tmp1, KILL tmp2, KILL tmp3, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr); +++ +++ format %{"String Compare $str1, $cnt1, $str2, $cnt2 -> $result\t#@string_compareUL" %} +++ ins_encode %{ +++ __ string_compare($str1$$Register, $str2$$Register, +++ $cnt1$$Register, $cnt2$$Register, $result$$Register, +++ $tmp1$$Register, $tmp2$$Register, $tmp3$$Register, +++ StrIntrinsicNode::UL); +++ %} +++ ins_pipe(pipe_class_memory); +++%} +++ +++instruct string_compareLU(iRegP_R11 str1, iRegI_R12 cnt1, iRegP_R13 str2, iRegI_R14 cnt2, +++ iRegI_R10 result, iRegP_R28 tmp1, iRegL_R29 tmp2, iRegL_R30 tmp3, +++ rFlagsReg cr) +++%{ +++ predicate(!UseRVV && ((StrCompNode *)n)->encoding() == StrIntrinsicNode::LU); +++ match(Set result (StrComp(Binary str1 cnt1)(Binary str2 cnt2))); +++ effect(KILL tmp1, KILL tmp2, KILL tmp3, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr); +++ +++ format %{ "String Compare $str1, $cnt1, $str2, $cnt2 -> $result\t#@string_compareLU" %} +++ ins_encode %{ +++ __ string_compare($str1$$Register, $str2$$Register, +++ $cnt1$$Register, $cnt2$$Register, $result$$Register, +++ $tmp1$$Register, $tmp2$$Register, $tmp3$$Register, +++ StrIntrinsicNode::LU); +++ %} +++ ins_pipe(pipe_class_memory); +++%} +++ +++instruct string_indexofUU(iRegP_R11 str1, iRegI_R12 cnt1, iRegP_R13 str2, iRegI_R14 cnt2, +++ iRegI_R10 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, +++ iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr) +++%{ +++ predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU); +++ match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2))); +++ effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, TEMP_DEF result, +++ TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr); +++ +++ format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UU)" %} +++ ins_encode %{ +++ __ string_indexof($str1$$Register, $str2$$Register, +++ $cnt1$$Register, $cnt2$$Register, +++ $tmp1$$Register, $tmp2$$Register, +++ $tmp3$$Register, $tmp4$$Register, +++ $tmp5$$Register, $tmp6$$Register, +++ $result$$Register, StrIntrinsicNode::UU); +++ %} +++ ins_pipe(pipe_class_memory); +++%} +++ +++instruct string_indexofLL(iRegP_R11 str1, iRegI_R12 cnt1, iRegP_R13 str2, iRegI_R14 cnt2, +++ iRegI_R10 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, +++ iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr) +++%{ +++ predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL); +++ match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2))); +++ effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, TEMP_DEF result, +++ TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr); +++ +++ format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (LL)" %} +++ ins_encode %{ +++ __ string_indexof($str1$$Register, $str2$$Register, +++ $cnt1$$Register, $cnt2$$Register, +++ $tmp1$$Register, $tmp2$$Register, +++ $tmp3$$Register, $tmp4$$Register, +++ $tmp5$$Register, $tmp6$$Register, +++ $result$$Register, StrIntrinsicNode::LL); +++ %} +++ ins_pipe(pipe_class_memory); +++%} +++ +++instruct string_indexofUL(iRegP_R11 str1, iRegI_R12 cnt1, iRegP_R13 str2, iRegI_R14 cnt2, +++ iRegI_R10 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, +++ iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr) +++%{ +++ predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL); +++ match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2))); +++ effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, TEMP_DEF result, +++ TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr); +++ format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UL)" %} +++ +++ ins_encode %{ +++ __ string_indexof($str1$$Register, $str2$$Register, +++ $cnt1$$Register, $cnt2$$Register, +++ $tmp1$$Register, $tmp2$$Register, +++ $tmp3$$Register, $tmp4$$Register, +++ $tmp5$$Register, $tmp6$$Register, +++ $result$$Register, StrIntrinsicNode::UL); +++ %} +++ ins_pipe(pipe_class_memory); +++%} +++ +++instruct string_indexof_conUU(iRegP_R11 str1, iRegI_R12 cnt1, iRegP_R13 str2, +++ immI_le_4 int_cnt2, iRegI_R10 result, iRegINoSp tmp1, iRegINoSp tmp2, +++ iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr) +++%{ +++ predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU); +++ match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2))); +++ effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, TEMP_DEF result, +++ TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr); +++ +++ format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UU)" %} +++ +++ ins_encode %{ +++ int icnt2 = (int)$int_cnt2$$constant; +++ __ string_indexof_linearscan($str1$$Register, $str2$$Register, +++ $cnt1$$Register, zr, +++ $tmp1$$Register, $tmp2$$Register, +++ $tmp3$$Register, $tmp4$$Register, +++ icnt2, $result$$Register, StrIntrinsicNode::UU); +++ %} +++ ins_pipe(pipe_class_memory); +++%} +++ +++instruct string_indexof_conLL(iRegP_R11 str1, iRegI_R12 cnt1, iRegP_R13 str2, +++ immI_le_4 int_cnt2, iRegI_R10 result, iRegINoSp tmp1, iRegINoSp tmp2, +++ iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr) +++%{ +++ predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL); +++ match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2))); +++ effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, TEMP_DEF result, +++ TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr); +++ +++ format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (LL)" %} +++ ins_encode %{ +++ int icnt2 = (int)$int_cnt2$$constant; +++ __ string_indexof_linearscan($str1$$Register, $str2$$Register, +++ $cnt1$$Register, zr, +++ $tmp1$$Register, $tmp2$$Register, +++ $tmp3$$Register, $tmp4$$Register, +++ icnt2, $result$$Register, StrIntrinsicNode::LL); +++ %} +++ ins_pipe(pipe_class_memory); +++%} +++ +++instruct string_indexof_conUL(iRegP_R11 str1, iRegI_R12 cnt1, iRegP_R13 str2, +++ immI_1 int_cnt2, iRegI_R10 result, iRegINoSp tmp1, iRegINoSp tmp2, +++ iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr) +++%{ +++ predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL); +++ match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2))); +++ effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, TEMP_DEF result, +++ TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr); +++ +++ format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UL)" %} +++ ins_encode %{ +++ int icnt2 = (int)$int_cnt2$$constant; +++ __ string_indexof_linearscan($str1$$Register, $str2$$Register, +++ $cnt1$$Register, zr, +++ $tmp1$$Register, $tmp2$$Register, +++ $tmp3$$Register, $tmp4$$Register, +++ icnt2, $result$$Register, StrIntrinsicNode::UL); +++ %} +++ ins_pipe(pipe_class_memory); +++%} +++ +++instruct stringU_indexof_char(iRegP_R11 str1, iRegI_R12 cnt1, iRegI_R13 ch, +++ iRegI_R10 result, iRegINoSp tmp1, iRegINoSp tmp2, +++ iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr) +++%{ +++ match(Set result (StrIndexOfChar (Binary str1 cnt1) ch)); +++ predicate(!UseRVV && (((StrIndexOfCharNode*)n)->encoding() == StrIntrinsicNode::U)); +++ effect(USE_KILL str1, USE_KILL cnt1, USE_KILL ch, TEMP_DEF result, +++ TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr); +++ +++ format %{ "StringUTF16 IndexOf char[] $str1, $cnt1, $ch -> $result" %} +++ ins_encode %{ +++ __ string_indexof_char($str1$$Register, $cnt1$$Register, $ch$$Register, +++ $result$$Register, $tmp1$$Register, $tmp2$$Register, +++ $tmp3$$Register, $tmp4$$Register, false /* isU */); +++ %} +++ ins_pipe(pipe_class_memory); +++%} +++ +++ +++instruct stringL_indexof_char(iRegP_R11 str1, iRegI_R12 cnt1, iRegI_R13 ch, +++ iRegI_R10 result, iRegINoSp tmp1, iRegINoSp tmp2, +++ iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr) +++%{ +++ match(Set result (StrIndexOfChar (Binary str1 cnt1) ch)); +++ predicate(!UseRVV && (((StrIndexOfCharNode*)n)->encoding() == StrIntrinsicNode::L)); +++ effect(USE_KILL str1, USE_KILL cnt1, USE_KILL ch, TEMP_DEF result, +++ TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr); +++ +++ format %{ "StringLatin1 IndexOf char[] $str1, $cnt1, $ch -> $result" %} +++ ins_encode %{ +++ __ string_indexof_char($str1$$Register, $cnt1$$Register, $ch$$Register, +++ $result$$Register, $tmp1$$Register, $tmp2$$Register, +++ $tmp3$$Register, $tmp4$$Register, true /* isL */); +++ %} +++ ins_pipe(pipe_class_memory); +++%} +++ +++// clearing of an array +++instruct clearArray_reg_reg(iRegL_R29 cnt, iRegP_R28 base, Universe dummy) +++%{ +++ predicate(!UseRVV); +++ match(Set dummy (ClearArray cnt base)); +++ effect(USE_KILL cnt, USE_KILL base); +++ +++ ins_cost(4 * DEFAULT_COST); +++ format %{ "ClearArray $cnt, $base\t#@clearArray_reg_reg" %} +++ +++ ins_encode %{ +++ address tpc = __ zero_words($base$$Register, $cnt$$Register); +++ if (tpc == NULL) { +++ ciEnv::current()->record_failure("CodeCache is full"); +++ return; +++ } +++ %} +++ +++ ins_pipe(pipe_class_memory); +++%} +++ +++instruct clearArray_imm_reg(immL cnt, iRegP_R28 base, Universe dummy, rFlagsReg cr) +++%{ +++ predicate(!UseRVV && (uint64_t)n->in(2)->get_long() +++ < (uint64_t)(BlockZeroingLowLimit >> LogBytesPerWord)); +++ match(Set dummy (ClearArray cnt base)); +++ effect(USE_KILL base, KILL cr); +++ +++ ins_cost(4 * DEFAULT_COST); +++ format %{ "ClearArray $cnt, $base\t#@clearArray_imm_reg" %} +++ +++ ins_encode %{ +++ __ zero_words($base$$Register, (uint64_t)$cnt$$constant); +++ %} +++ +++ ins_pipe(pipe_class_memory); +++%} +++ +++instruct string_equalsL(iRegP_R11 str1, iRegP_R13 str2, iRegI_R14 cnt, +++ iRegI_R10 result, rFlagsReg cr) +++%{ +++ predicate(!UseRVV && ((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::LL); +++ match(Set result (StrEquals (Binary str1 str2) cnt)); +++ effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr); +++ +++ format %{ "String Equals $str1, $str2, $cnt -> $result\t#@string_equalsL" %} +++ ins_encode %{ +++ // Count is in 8-bit bytes; non-Compact chars are 16 bits. +++ __ string_equals($str1$$Register, $str2$$Register, +++ $result$$Register, $cnt$$Register, 1); +++ %} +++ ins_pipe(pipe_class_memory); +++%} +++ +++instruct string_equalsU(iRegP_R11 str1, iRegP_R13 str2, iRegI_R14 cnt, +++ iRegI_R10 result, rFlagsReg cr) +++%{ +++ predicate(!UseRVV && ((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::UU); +++ match(Set result (StrEquals (Binary str1 str2) cnt)); +++ effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr); +++ +++ format %{ "String Equals $str1, $str2, $cnt -> $result\t#@string_equalsU" %} +++ ins_encode %{ +++ // Count is in 8-bit bytes; non-Compact chars are 16 bits. +++ __ string_equals($str1$$Register, $str2$$Register, +++ $result$$Register, $cnt$$Register, 2); +++ %} +++ ins_pipe(pipe_class_memory); +++%} +++ +++instruct array_equalsB(iRegP_R11 ary1, iRegP_R12 ary2, iRegI_R10 result, +++ iRegP_R13 tmp1, iRegP_R14 tmp2, iRegP_R15 tmp3, +++ iRegP_R16 tmp4, iRegP_R28 tmp5, rFlagsReg cr) +++%{ +++ predicate(!UseRVV && ((AryEqNode*)n)->encoding() == StrIntrinsicNode::LL); +++ match(Set result (AryEq ary1 ary2)); +++ effect(USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL tmp5, KILL cr); +++ +++ format %{ "Array Equals $ary1, ary2 -> $result\t#@array_equalsB // KILL $tmp5" %} +++ ins_encode %{ +++ __ arrays_equals($ary1$$Register, $ary2$$Register, +++ $tmp1$$Register, $tmp2$$Register, $tmp3$$Register, $tmp4$$Register, +++ $result$$Register, $tmp5$$Register, 1); +++ %} +++ ins_pipe(pipe_class_memory); +++%} +++ +++instruct array_equalsC(iRegP_R11 ary1, iRegP_R12 ary2, iRegI_R10 result, +++ iRegP_R13 tmp1, iRegP_R14 tmp2, iRegP_R15 tmp3, +++ iRegP_R16 tmp4, iRegP_R28 tmp5, rFlagsReg cr) +++%{ +++ predicate(!UseRVV && ((AryEqNode*)n)->encoding() == StrIntrinsicNode::UU); +++ match(Set result (AryEq ary1 ary2)); +++ effect(USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL tmp5, KILL cr); +++ +++ format %{ "Array Equals $ary1, ary2 -> $result\t#@array_equalsC // KILL $tmp5" %} +++ ins_encode %{ +++ __ arrays_equals($ary1$$Register, $ary2$$Register, +++ $tmp1$$Register, $tmp2$$Register, $tmp3$$Register, $tmp4$$Register, +++ $result$$Register, $tmp5$$Register, 2); +++ %} +++ ins_pipe(pipe_class_memory); +++%} +++ +++// ============================================================================ +++// Safepoint Instructions +++ +++instruct safePoint(iRegP poll) +++%{ +++ match(SafePoint poll); +++ +++ ins_cost(2 * LOAD_COST); +++ format %{ +++ "lwu zr, [$poll]\t# Safepoint: poll for GC, #@safePoint" +++ %} +++ ins_encode %{ +++ __ read_polling_page(as_Register($poll$$reg), 0, relocInfo::poll_type); +++ %} +++ ins_pipe(pipe_serial); // ins_pipe(iload_reg_mem); +++%} +++ +++// ============================================================================ +++// This name is KNOWN by the ADLC and cannot be changed. +++// The ADLC forces a 'TypeRawPtr::BOTTOM' output type +++// for this guy. +++instruct tlsLoadP(javaThread_RegP dst) +++%{ +++ match(Set dst (ThreadLocal)); +++ +++ ins_cost(0); +++ +++ format %{ " -- \t// $dst=Thread::current(), empty, #@tlsLoadP" %} +++ +++ size(0); +++ +++ ins_encode( /*empty*/ ); +++ +++ ins_pipe(pipe_class_empty); +++%} +++ +++// inlined locking and unlocking +++// using t1 as the 'flag' register to bridge the BoolNode producers and consumers +++instruct cmpFastLock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp1, iRegPNoSp tmp2) +++%{ +++ match(Set cr (FastLock object box)); +++ effect(TEMP tmp1, TEMP tmp2); +++ +++ ins_cost(LOAD_COST * 2 + STORE_COST * 3 + ALU_COST * 6 + BRANCH_COST * 3); +++ format %{ "fastlock $object,$box\t! kills $tmp1,$tmp2, #@cmpFastLock" %} +++ +++ ins_encode(riscv_enc_fast_lock(object, box, tmp1, tmp2)); +++ +++ ins_pipe(pipe_serial); +++%} +++ +++// using t1 as the 'flag' register to bridge the BoolNode producers and consumers +++instruct cmpFastUnlock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp1, iRegPNoSp tmp2) +++%{ +++ match(Set cr (FastUnlock object box)); +++ effect(TEMP tmp1, TEMP tmp2); +++ +++ ins_cost(LOAD_COST * 2 + STORE_COST + ALU_COST * 2 + BRANCH_COST * 4); +++ format %{ "fastunlock $object,$box\t! kills $tmp1, $tmp2, #@cmpFastUnlock" %} +++ +++ ins_encode(riscv_enc_fast_unlock(object, box, tmp1, tmp2)); +++ +++ ins_pipe(pipe_serial); +++%} +++ +++// Tail Call; Jump from runtime stub to Java code. +++// Also known as an 'interprocedural jump'. +++// Target of jump will eventually return to caller. +++// TailJump below removes the return address. +++instruct TailCalljmpInd(iRegPNoSp jump_target, inline_cache_RegP method_oop) +++%{ +++ match(TailCall jump_target method_oop); +++ +++ ins_cost(BRANCH_COST); +++ +++ format %{ "jalr $jump_target\t# $method_oop holds method oop, #@TailCalljmpInd." %} +++ +++ ins_encode(riscv_enc_tail_call(jump_target)); +++ +++ ins_pipe(pipe_class_call); +++%} +++ +++instruct TailjmpInd(iRegPNoSp jump_target, iRegP_R10 ex_oop) +++%{ +++ match(TailJump jump_target ex_oop); +++ +++ ins_cost(ALU_COST + BRANCH_COST); +++ +++ format %{ "jalr $jump_target\t# $ex_oop holds exception oop, #@TailjmpInd." %} +++ +++ ins_encode(riscv_enc_tail_jmp(jump_target)); +++ +++ ins_pipe(pipe_class_call); +++%} +++ +++// Create exception oop: created by stack-crawling runtime code. +++// Created exception is now available to this handler, and is setup +++// just prior to jumping to this handler. No code emitted. +++instruct CreateException(iRegP_R10 ex_oop) +++%{ +++ match(Set ex_oop (CreateEx)); +++ +++ ins_cost(0); +++ format %{ " -- \t// exception oop; no code emitted, #@CreateException" %} +++ +++ size(0); +++ +++ ins_encode( /*empty*/ ); +++ +++ ins_pipe(pipe_class_empty); +++%} +++ +++// Rethrow exception: The exception oop will come in the first +++// argument position. Then JUMP (not call) to the rethrow stub code. +++instruct RethrowException() +++%{ +++ match(Rethrow); +++ +++ ins_cost(BRANCH_COST); +++ +++ format %{ "j rethrow_stub\t#@RethrowException" %} +++ +++ ins_encode(riscv_enc_rethrow()); +++ +++ ins_pipe(pipe_class_call); +++%} +++ +++// Return Instruction +++// epilog node loads ret address into ra as part of frame pop +++instruct Ret() +++%{ +++ match(Return); +++ +++ ins_cost(BRANCH_COST); +++ format %{ "ret\t// return register, #@Ret" %} +++ +++ ins_encode(riscv_enc_ret()); +++ +++ ins_pipe(pipe_branch); +++%} +++ +++// Die now. +++instruct ShouldNotReachHere() %{ +++ match(Halt); +++ +++ ins_cost(BRANCH_COST); +++ +++ format %{ "#@ShouldNotReachHere" %} +++ +++ ins_encode %{ +++ if (is_reachable()) { +++ __ stop(_halt_reason); +++ } +++ %} +++ +++ ins_pipe(pipe_class_default); +++%} +++ +++ +++//----------PEEPHOLE RULES----------------------------------------------------- +++// These must follow all instruction definitions as they use the names +++// defined in the instructions definitions. +++// +++// peepmatch ( root_instr_name [preceding_instruction]* ); +++// +++// peepconstraint %{ +++// (instruction_number.operand_name relational_op instruction_number.operand_name +++// [, ...] ); +++// // instruction numbers are zero-based using left to right order in peepmatch +++// +++// peepreplace ( instr_name ( [instruction_number.operand_name]* ) ); +++// // provide an instruction_number.operand_name for each operand that appears +++// // in the replacement instruction's match rule +++// +++// ---------VM FLAGS--------------------------------------------------------- +++// +++// All peephole optimizations can be turned off using -XX:-OptoPeephole +++// +++// Each peephole rule is given an identifying number starting with zero and +++// increasing by one in the order seen by the parser. An individual peephole +++// can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=# +++// on the command-line. +++// +++// ---------CURRENT LIMITATIONS---------------------------------------------- +++// +++// Only match adjacent instructions in same basic block +++// Only equality constraints +++// Only constraints between operands, not (0.dest_reg == RAX_enc) +++// Only one replacement instruction +++// +++//----------SMARTSPILL RULES--------------------------------------------------- +++// These must follow all instruction definitions as they use the names +++// defined in the instructions definitions. +++ +++// Local Variables: +++// mode: c++ +++// End: ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/riscv_b.ad ++@@ -0,0 +1,575 @@ +++// +++// Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved. +++// Copyright (c) 2022, Huawei Technologies Co., Ltd. All rights reserved. +++// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++// +++// This code is free software; you can redistribute it and/or modify it +++// under the terms of the GNU General Public License version 2 only, as +++// published by the Free Software Foundation. +++// +++// This code is distributed in the hope that it will be useful, but WITHOUT +++// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++// version 2 for more details (a copy is included in the LICENSE file that +++// accompanied this code). +++// +++// You should have received a copy of the GNU General Public License version +++// 2 along with this work; if not, write to the Free Software Foundation, +++// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++// +++// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++// or visit www.oracle.com if you need additional information or have any +++// questions. +++// +++// +++ +++// RISCV Bit-Manipulation Extension Architecture Description File +++ +++instruct rorI_imm_b(iRegINoSp dst, iRegI src, immI shift) %{ +++ predicate(UseZbb); +++ match(Set dst (RotateRight src shift)); +++ +++ format %{ "roriw $dst, $src, ($shift & 0x1f)\t#@rorI_imm_b" %} +++ +++ ins_cost(ALU_COST); +++ ins_encode %{ +++ __ roriw(as_Register($dst$$reg), as_Register($src$$reg), $shift$$constant & 0x1f); +++ %} +++ +++ ins_pipe(ialu_reg_shift); +++%} +++ +++instruct rorL_imm_b(iRegLNoSp dst, iRegL src, immI shift) %{ +++ predicate(UseZbb); +++ match(Set dst (RotateRight src shift)); +++ +++ format %{ "rori $dst, $src, ($shift & 0x3f)\t#@rorL_imm_b" %} +++ +++ ins_cost(ALU_COST); +++ ins_encode %{ +++ __ rori(as_Register($dst$$reg), as_Register($src$$reg), $shift$$constant & 0x3f); +++ %} +++ +++ ins_pipe(ialu_reg_shift); +++%} +++ +++instruct rorI_reg_b(iRegINoSp dst, iRegI src, iRegI shift) %{ +++ predicate(UseZbb); +++ match(Set dst (RotateRight src shift)); +++ +++ format %{ "rorw $dst, $src, $shift\t#@rorI_reg_b" %} +++ ins_cost(ALU_COST); +++ ins_encode %{ +++ __ rorw(as_Register($dst$$reg), as_Register($src$$reg), as_Register($shift$$reg)); +++ %} +++ ins_pipe(ialu_reg_reg); +++%} +++ +++instruct rorL_reg_b(iRegLNoSp dst, iRegL src, iRegI shift) %{ +++ predicate(UseZbb); +++ match(Set dst (RotateRight src shift)); +++ +++ format %{ "ror $dst, $src, $shift\t#@rorL_reg_b" %} +++ ins_cost(ALU_COST); +++ ins_encode %{ +++ __ ror(as_Register($dst$$reg), as_Register($src$$reg), as_Register($shift$$reg)); +++ %} +++ ins_pipe(ialu_reg_reg); +++%} +++ +++instruct rolI_reg_b(iRegINoSp dst, iRegI src, iRegI shift) %{ +++ predicate(UseZbb); +++ match(Set dst (RotateLeft src shift)); +++ +++ format %{ "rolw $dst, $src, $shift\t#@rolI_reg_b" %} +++ ins_cost(ALU_COST); +++ ins_encode %{ +++ __ rolw(as_Register($dst$$reg), as_Register($src$$reg), as_Register($shift$$reg)); +++ %} +++ ins_pipe(ialu_reg_reg); +++%} +++ +++instruct rolL_reg_b(iRegLNoSp dst, iRegL src, iRegI shift) %{ +++ predicate(UseZbb); +++ match(Set dst (RotateLeft src shift)); +++ +++ format %{ "rol $dst, $src, $shift\t#@rolL_reg_b" %} +++ ins_cost(ALU_COST); +++ ins_encode %{ +++ __ rol(as_Register($dst$$reg), as_Register($src$$reg), as_Register($shift$$reg)); +++ %} +++ ins_pipe(ialu_reg_reg); +++%} +++ +++// Convert oop into int for vectors alignment masking +++instruct convP2I_b(iRegINoSp dst, iRegP src) %{ +++ predicate(UseZba); +++ match(Set dst (ConvL2I (CastP2X src))); +++ +++ format %{ "zext.w $dst, $src\t# ptr -> int @convP2I_b" %} +++ +++ ins_cost(ALU_COST); +++ ins_encode %{ +++ __ zext_w(as_Register($dst$$reg), as_Register($src$$reg)); +++ %} +++ +++ ins_pipe(ialu_reg); +++%} +++ +++// byte to int +++instruct convB2I_reg_reg_b(iRegINoSp dst, iRegIorL2I src, immI_24 lshift, immI_24 rshift) %{ +++ predicate(UseZbb); +++ match(Set dst (RShiftI (LShiftI src lshift) rshift)); +++ +++ format %{ "sext.b $dst, $src\t# b2i, #@convB2I_reg_reg_b" %} +++ +++ ins_cost(ALU_COST); +++ ins_encode %{ +++ __ sext_b(as_Register($dst$$reg), as_Register($src$$reg)); +++ %} +++ +++ ins_pipe(ialu_reg); +++%} +++ +++// int to short +++instruct convI2S_reg_reg_b(iRegINoSp dst, iRegIorL2I src, immI_16 lshift, immI_16 rshift) %{ +++ predicate(UseZbb); +++ match(Set dst (RShiftI (LShiftI src lshift) rshift)); +++ +++ format %{ "sext.h $dst, $src\t# i2s, #@convI2S_reg_reg_b" %} +++ +++ ins_cost(ALU_COST); +++ ins_encode %{ +++ __ sext_h(as_Register($dst$$reg), as_Register($src$$reg)); +++ %} +++ +++ ins_pipe(ialu_reg); +++%} +++ +++// short to unsigned int +++instruct convS2UI_reg_reg_b(iRegINoSp dst, iRegIorL2I src, immI_16bits mask) %{ +++ predicate(UseZbb); +++ match(Set dst (AndI src mask)); +++ +++ format %{ "zext.h $dst, $src\t# s2ui, #@convS2UI_reg_reg_b" %} +++ +++ ins_cost(ALU_COST); +++ ins_encode %{ +++ __ zext_h(as_Register($dst$$reg), as_Register($src$$reg)); +++ %} +++ +++ ins_pipe(ialu_reg); +++%} +++ +++// int to unsigned long (zero extend) +++instruct convI2UL_reg_reg_b(iRegLNoSp dst, iRegIorL2I src, immL_32bits mask) %{ +++ predicate(UseZba); +++ match(Set dst (AndL (ConvI2L src) mask)); +++ +++ format %{ "zext.w $dst, $src\t# i2ul, #@convI2UL_reg_reg_b" %} +++ +++ ins_cost(ALU_COST); +++ ins_encode %{ +++ __ zext_w(as_Register($dst$$reg), as_Register($src$$reg)); +++ %} +++ +++ ins_pipe(ialu_reg_shift); +++%} +++ +++// BSWAP instructions +++instruct bytes_reverse_int_b(iRegINoSp dst, iRegIorL2I src) %{ +++ predicate(UseZbb); +++ match(Set dst (ReverseBytesI src)); +++ +++ ins_cost(ALU_COST * 2); +++ format %{ "revb_w_w $dst, $src\t#@bytes_reverse_int_b" %} +++ +++ ins_encode %{ +++ __ revb_w_w(as_Register($dst$$reg), as_Register($src$$reg)); +++ %} +++ +++ ins_pipe(ialu_reg); +++%} +++ +++instruct bytes_reverse_long_b(iRegLNoSp dst, iRegL src) %{ +++ predicate(UseZbb); +++ match(Set dst (ReverseBytesL src)); +++ +++ ins_cost(ALU_COST); +++ format %{ "rev8 $dst, $src\t#@bytes_reverse_long_b" %} +++ +++ ins_encode %{ +++ __ rev8(as_Register($dst$$reg), as_Register($src$$reg)); +++ %} +++ +++ ins_pipe(ialu_reg); +++%} +++ +++instruct bytes_reverse_unsigned_short_b(iRegINoSp dst, iRegIorL2I src) %{ +++ predicate(UseZbb); +++ match(Set dst (ReverseBytesUS src)); +++ +++ ins_cost(ALU_COST * 2); +++ format %{ "revb_h_h_u $dst, $src\t#@bytes_reverse_unsigned_short_b" %} +++ +++ ins_encode %{ +++ __ revb_h_h_u(as_Register($dst$$reg), as_Register($src$$reg)); +++ %} +++ +++ ins_pipe(ialu_reg); +++%} +++ +++instruct bytes_reverse_short_b(iRegINoSp dst, iRegIorL2I src) %{ +++ predicate(UseZbb); +++ match(Set dst (ReverseBytesS src)); +++ +++ ins_cost(ALU_COST * 2); +++ format %{ "revb_h_h $dst, $src\t#@bytes_reverse_short_b" %} +++ +++ ins_encode %{ +++ __ revb_h_h(as_Register($dst$$reg), as_Register($src$$reg)); +++ %} +++ +++ ins_pipe(ialu_reg); +++%} +++ +++// Shift Add Pointer +++instruct shaddP_reg_reg_b(iRegPNoSp dst, iRegP src1, iRegL src2, immIScale imm) %{ +++ predicate(UseZba); +++ match(Set dst (AddP src1 (LShiftL src2 imm))); +++ +++ ins_cost(ALU_COST); +++ format %{ "shadd $dst, $src2, $src1, $imm\t# ptr, #@shaddP_reg_reg_b" %} +++ +++ ins_encode %{ +++ __ shadd(as_Register($dst$$reg), +++ as_Register($src2$$reg), +++ as_Register($src1$$reg), +++ t0, +++ $imm$$constant); +++ %} +++ +++ ins_pipe(ialu_reg_reg); +++%} +++ +++instruct shaddP_reg_reg_ext_b(iRegPNoSp dst, iRegP src1, iRegI src2, immIScale imm) %{ +++ predicate(UseZba); +++ match(Set dst (AddP src1 (LShiftL (ConvI2L src2) imm))); +++ +++ ins_cost(ALU_COST); +++ format %{ "shadd $dst, $src2, $src1, $imm\t# ptr, #@shaddP_reg_reg_ext_b" %} +++ +++ ins_encode %{ +++ __ shadd(as_Register($dst$$reg), +++ as_Register($src2$$reg), +++ as_Register($src1$$reg), +++ t0, +++ $imm$$constant); +++ %} +++ +++ ins_pipe(ialu_reg_reg); +++%} +++ +++// Shift Add Long +++instruct shaddL_reg_reg_b(iRegLNoSp dst, iRegL src1, iRegL src2, immIScale imm) %{ +++ predicate(UseZba); +++ match(Set dst (AddL src1 (LShiftL src2 imm))); +++ +++ ins_cost(ALU_COST); +++ format %{ "shadd $dst, $src2, $src1, $imm\t#@shaddL_reg_reg_b" %} +++ +++ ins_encode %{ +++ __ shadd(as_Register($dst$$reg), +++ as_Register($src2$$reg), +++ as_Register($src1$$reg), +++ t0, +++ $imm$$constant); +++ %} +++ +++ ins_pipe(ialu_reg_reg); +++%} +++ +++instruct shaddL_reg_reg_ext_b(iRegLNoSp dst, iRegL src1, iRegI src2, immIScale imm) %{ +++ predicate(UseZba); +++ match(Set dst (AddL src1 (LShiftL (ConvI2L src2) imm))); +++ +++ ins_cost(ALU_COST); +++ format %{ "shadd $dst, $src2, $src1, $imm\t#@shaddL_reg_reg_ext_b" %} +++ +++ ins_encode %{ +++ __ shadd(as_Register($dst$$reg), +++ as_Register($src2$$reg), +++ as_Register($src1$$reg), +++ t0, +++ $imm$$constant); +++ %} +++ +++ ins_pipe(ialu_reg_reg); +++%} +++ +++// Zeros Count instructions +++instruct countLeadingZerosI_b(iRegINoSp dst, iRegIorL2I src) %{ +++ predicate(UseZbb); +++ match(Set dst (CountLeadingZerosI src)); +++ +++ ins_cost(ALU_COST); +++ format %{ "clzw $dst, $src\t#@countLeadingZerosI_b" %} +++ +++ ins_encode %{ +++ __ clzw(as_Register($dst$$reg), as_Register($src$$reg)); +++ %} +++ +++ ins_pipe(ialu_reg); +++%} +++ +++instruct countLeadingZerosL_b(iRegINoSp dst, iRegL src) %{ +++ predicate(UseZbb); +++ match(Set dst (CountLeadingZerosL src)); +++ +++ ins_cost(ALU_COST); +++ format %{ "clz $dst, $src\t#@countLeadingZerosL_b" %} +++ +++ ins_encode %{ +++ __ clz(as_Register($dst$$reg), as_Register($src$$reg)); +++ %} +++ +++ ins_pipe(ialu_reg); +++%} +++ +++instruct countTrailingZerosI_b(iRegINoSp dst, iRegIorL2I src) %{ +++ predicate(UseZbb); +++ match(Set dst (CountTrailingZerosI src)); +++ +++ ins_cost(ALU_COST); +++ format %{ "ctzw $dst, $src\t#@countTrailingZerosI_b" %} +++ +++ ins_encode %{ +++ __ ctzw(as_Register($dst$$reg), as_Register($src$$reg)); +++ %} +++ +++ ins_pipe(ialu_reg); +++%} +++ +++instruct countTrailingZerosL_b(iRegINoSp dst, iRegL src) %{ +++ predicate(UseZbb); +++ match(Set dst (CountTrailingZerosL src)); +++ +++ ins_cost(ALU_COST); +++ format %{ "ctz $dst, $src\t#@countTrailingZerosL_b" %} +++ +++ ins_encode %{ +++ __ ctz(as_Register($dst$$reg), as_Register($src$$reg)); +++ %} +++ +++ ins_pipe(ialu_reg); +++%} +++ +++// Population Count instructions +++instruct popCountI_b(iRegINoSp dst, iRegIorL2I src) %{ +++ predicate(UsePopCountInstruction); +++ match(Set dst (PopCountI src)); +++ +++ ins_cost(ALU_COST); +++ format %{ "cpopw $dst, $src\t#@popCountI_b" %} +++ +++ ins_encode %{ +++ __ cpopw(as_Register($dst$$reg), as_Register($src$$reg)); +++ %} +++ +++ ins_pipe(ialu_reg); +++%} +++ +++// Note: Long/bitCount(long) returns an int. +++instruct popCountL_b(iRegINoSp dst, iRegL src) %{ +++ predicate(UsePopCountInstruction); +++ match(Set dst (PopCountL src)); +++ +++ ins_cost(ALU_COST); +++ format %{ "cpop $dst, $src\t#@popCountL_b" %} +++ +++ ins_encode %{ +++ __ cpop(as_Register($dst$$reg), as_Register($src$$reg)); +++ %} +++ +++ ins_pipe(ialu_reg); +++%} +++ +++// Max and Min +++instruct minI_reg_reg_b(iRegINoSp dst, iRegI src1, iRegI src2) %{ +++ predicate(UseZbb); +++ match(Set dst (MinI src1 src2)); +++ +++ ins_cost(ALU_COST); +++ format %{ "min $dst, $src1, $src2\t#@minI_reg_reg_b" %} +++ +++ ins_encode %{ +++ __ min(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg)); +++ %} +++ +++ ins_pipe(ialu_reg_reg); +++%} +++ +++instruct maxI_reg_reg_b(iRegINoSp dst, iRegI src1, iRegI src2) %{ +++ predicate(UseZbb); +++ match(Set dst (MaxI src1 src2)); +++ +++ ins_cost(ALU_COST); +++ format %{ "max $dst, $src1, $src2\t#@maxI_reg_reg_b" %} +++ +++ ins_encode %{ +++ __ max(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg)); +++ %} +++ +++ ins_pipe(ialu_reg_reg); +++%} +++ +++// special case for comparing with zero +++// n.b. this is selected in preference to the rule above because it +++// avoids loading constant 0 into a source register +++ +++instruct minI_reg_zero_b(iRegINoSp dst, iRegI src1, immI0 zero) %{ +++ predicate(UseZbb); +++ match(Set dst (MinI src1 zero)); +++ match(Set dst (MinI zero src1)); +++ +++ ins_cost(ALU_COST); +++ format %{ "min $dst, $src1, zr\t#@minI_reg_zero_b" %} +++ +++ ins_encode %{ +++ __ min(as_Register($dst$$reg), as_Register($src1$$reg), zr); +++ %} +++ +++ ins_pipe(ialu_reg_reg); +++%} +++ +++instruct maxI_reg_zero_b(iRegINoSp dst, iRegI src1, immI0 zero) %{ +++ predicate(UseZbb); +++ match(Set dst (MaxI src1 zero)); +++ match(Set dst (MaxI zero src1)); +++ +++ ins_cost(ALU_COST); +++ format %{ "max $dst, $src1, zr\t#@maxI_reg_zero_b" %} +++ +++ ins_encode %{ +++ __ max(as_Register($dst$$reg), as_Register($src1$$reg), zr); +++ %} +++ +++ ins_pipe(ialu_reg_reg); +++%} +++ +++// Abs +++instruct absI_reg_b(iRegINoSp dst, iRegI src) %{ +++ predicate(UseZbb); +++ match(Set dst (AbsI src)); +++ +++ ins_cost(ALU_COST * 2); +++ format %{ +++ "negw t0, $src\n\t" +++ "max $dst, $src, t0\t#@absI_reg_b" +++ %} +++ +++ ins_encode %{ +++ __ negw(t0, as_Register($src$$reg)); +++ __ max(as_Register($dst$$reg), as_Register($src$$reg), t0); +++ %} +++ +++ ins_pipe(ialu_reg_reg); +++%} +++ +++instruct absL_reg_b(iRegLNoSp dst, iRegL src) %{ +++ predicate(UseZbb); +++ match(Set dst (AbsL src)); +++ +++ ins_cost(ALU_COST * 2); +++ format %{ +++ "neg t0, $src\n\t" +++ "max $dst, $src, t0\t#@absL_reg_b" +++ %} +++ +++ ins_encode %{ +++ __ neg(t0, as_Register($src$$reg)); +++ __ max(as_Register($dst$$reg), as_Register($src$$reg), t0); +++ %} +++ +++ ins_pipe(ialu_reg); +++%} +++ +++// And Not +++instruct andnI_reg_reg_b(iRegINoSp dst, iRegI src1, iRegI src2, immI_M1 m1) %{ +++ predicate(UseZbb); +++ match(Set dst (AndI src1 (XorI src2 m1))); +++ +++ ins_cost(ALU_COST); +++ format %{ "andn $dst, $src1, $src2\t#@andnI_reg_reg_b" %} +++ +++ ins_encode %{ +++ __ andn(as_Register($dst$$reg), +++ as_Register($src1$$reg), +++ as_Register($src2$$reg)); +++ %} +++ +++ ins_pipe(ialu_reg_reg); +++%} +++ +++instruct andnL_reg_reg_b(iRegLNoSp dst, iRegL src1, iRegL src2, immL_M1 m1) %{ +++ predicate(UseZbb); +++ match(Set dst (AndL src1 (XorL src2 m1))); +++ +++ ins_cost(ALU_COST); +++ format %{ "andn $dst, $src1, $src2\t#@andnL_reg_reg_b" %} +++ +++ ins_encode %{ +++ __ andn(as_Register($dst$$reg), +++ as_Register($src1$$reg), +++ as_Register($src2$$reg)); +++ %} +++ +++ ins_pipe(ialu_reg_reg); +++%} +++ +++// Or Not +++instruct ornI_reg_reg_b(iRegINoSp dst, iRegI src1, iRegI src2, immI_M1 m1) %{ +++ predicate(UseZbb); +++ match(Set dst (OrI src1 (XorI src2 m1))); +++ +++ ins_cost(ALU_COST); +++ format %{ "orn $dst, $src1, $src2\t#@ornI_reg_reg_b" %} +++ +++ ins_encode %{ +++ __ orn(as_Register($dst$$reg), +++ as_Register($src1$$reg), +++ as_Register($src2$$reg)); +++ %} +++ +++ ins_pipe(ialu_reg_reg); +++%} +++ +++instruct ornL_reg_reg_b(iRegLNoSp dst, iRegL src1, iRegL src2, immL_M1 m1) %{ +++ predicate(UseZbb); +++ match(Set dst (OrL src1 (XorL src2 m1))); +++ +++ ins_cost(ALU_COST); +++ format %{ "orn $dst, $src1, $src2\t#@ornL_reg_reg_b" %} +++ +++ ins_encode %{ +++ __ orn(as_Register($dst$$reg), +++ as_Register($src1$$reg), +++ as_Register($src2$$reg)); +++ %} +++ +++ ins_pipe(ialu_reg_reg); +++%} +++ +++// AndI 0b0..010..0 + ConvI2B +++instruct convI2Bool_andI_reg_immIpowerOf2(iRegINoSp dst, iRegIorL2I src, immIpowerOf2 mask) %{ +++ predicate(UseZbs); +++ match(Set dst (Conv2B (AndI src mask))); +++ ins_cost(ALU_COST); +++ +++ format %{ "bexti $dst, $src, $mask\t#@convI2Bool_andI_reg_immIpowerOf2" %} +++ ins_encode %{ +++ __ bexti($dst$$Register, $src$$Register, exact_log2((juint)($mask$$constant))); +++ %} +++ +++ ins_pipe(ialu_reg_reg); +++%} ++\ No newline at end of file ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/riscv_v.ad ++@@ -0,0 +1,2038 @@ +++// +++// Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved. +++// Copyright (c) 2020, Arm Limited. All rights reserved. +++// Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. +++// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++// +++// This code is free software; you can redistribute it and/or modify it +++// under the terms of the GNU General Public License version 2 only, as +++// published by the Free Software Foundation. +++// +++// This code is distributed in the hope that it will be useful, but WITHOUT +++// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++// version 2 for more details (a copy is included in the LICENSE file that +++// accompanied this code). +++// +++// You should have received a copy of the GNU General Public License version +++// 2 along with this work; if not, write to the Free Software Foundation, +++// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++// +++// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++// or visit www.oracle.com if you need additional information or have any +++// questions. +++// +++// +++ +++// RISCV Vector Extension Architecture Description File +++ +++opclass vmemA(indirect); +++ +++source_hpp %{ +++ bool op_vec_supported(int opcode); +++%} +++ +++source %{ +++ static inline BasicType vector_element_basic_type(const MachNode* n) { +++ const TypeVect* vt = n->bottom_type()->is_vect(); +++ return vt->element_basic_type(); +++ } +++ +++ static inline BasicType vector_element_basic_type(const MachNode* use, const MachOper* opnd) { +++ int def_idx = use->operand_index(opnd); +++ Node* def = use->in(def_idx); +++ const TypeVect* vt = def->bottom_type()->is_vect(); +++ return vt->element_basic_type(); +++ } +++ +++ static void loadStore(C2_MacroAssembler masm, bool is_store, +++ VectorRegister reg, BasicType bt, Register base) { +++ Assembler::SEW sew = Assembler::elemtype_to_sew(bt); +++ masm.vsetvli(t0, x0, sew); +++ if (is_store) { +++ masm.vsex_v(reg, base, sew); +++ } else { +++ masm.vlex_v(reg, base, sew); +++ } +++ } +++ +++ bool op_vec_supported(int opcode) { +++ switch (opcode) { +++ // No multiply reduction instructions +++ case Op_MulReductionVD: +++ case Op_MulReductionVF: +++ case Op_MulReductionVI: +++ case Op_MulReductionVL: +++ // Others +++ case Op_Extract: +++ case Op_ExtractB: +++ case Op_ExtractC: +++ case Op_ExtractD: +++ case Op_ExtractF: +++ case Op_ExtractI: +++ case Op_ExtractL: +++ case Op_ExtractS: +++ case Op_ExtractUB: +++ // Vector API specific +++ case Op_AndReductionV: +++ case Op_OrReductionV: +++ case Op_XorReductionV: +++ case Op_LoadVectorGather: +++ case Op_StoreVectorScatter: +++ case Op_VectorBlend: +++ case Op_VectorCast: +++ case Op_VectorCastB2X: +++ case Op_VectorCastD2X: +++ case Op_VectorCastF2X: +++ case Op_VectorCastI2X: +++ case Op_VectorCastL2X: +++ case Op_VectorCastS2X: +++ case Op_VectorInsert: +++ case Op_VectorLoadConst: +++ case Op_VectorLoadMask: +++ case Op_VectorLoadShuffle: +++ case Op_VectorMaskCmp: +++ case Op_VectorRearrange: +++ case Op_VectorReinterpret: +++ case Op_VectorStoreMask: +++ case Op_VectorTest: +++ case Op_PopCountVI: +++ return false; +++ default: +++ return UseRVV; +++ } +++ } +++ +++%} +++ +++definitions %{ +++ int_def VEC_COST (200, 200); +++%} +++ +++// All VEC instructions +++ +++// vector load/store +++instruct loadV(vReg dst, vmemA mem) %{ +++ match(Set dst (LoadVector mem)); +++ ins_cost(VEC_COST); +++ format %{ "vle $dst, $mem\t#@loadV" %} +++ ins_encode %{ +++ VectorRegister dst_reg = as_VectorRegister($dst$$reg); +++ loadStore(C2_MacroAssembler(&cbuf), false, dst_reg, +++ vector_element_basic_type(this), as_Register($mem$$base)); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++instruct storeV(vReg src, vmemA mem) %{ +++ match(Set mem (StoreVector mem src)); +++ ins_cost(VEC_COST); +++ format %{ "vse $src, $mem\t#@storeV" %} +++ ins_encode %{ +++ VectorRegister src_reg = as_VectorRegister($src$$reg); +++ loadStore(C2_MacroAssembler(&cbuf), true, src_reg, +++ vector_element_basic_type(this, $src), as_Register($mem$$base)); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++// vector abs +++ +++instruct vabsB(vReg dst, vReg src, vReg tmp) %{ +++ match(Set dst (AbsVB src)); +++ ins_cost(VEC_COST); +++ effect(TEMP tmp); +++ format %{ "vrsub.vi $tmp, 0, $src\t#@vabsB\n\t" +++ "vmax.vv $dst, $tmp, $src" %} +++ ins_encode %{ +++ __ vsetvli(t0, x0, Assembler::e8); +++ __ vrsub_vi(as_VectorRegister($tmp$$reg), as_VectorRegister($src$$reg), 0); +++ __ vmax_vv(as_VectorRegister($dst$$reg), as_VectorRegister($tmp$$reg), as_VectorRegister($src$$reg)); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++instruct vabsS(vReg dst, vReg src, vReg tmp) %{ +++ match(Set dst (AbsVS src)); +++ ins_cost(VEC_COST); +++ effect(TEMP tmp); +++ format %{ "vrsub.vi $tmp, 0, $src\t#@vabsS\n\t" +++ "vmax.vv $dst, $tmp, $src" %} +++ ins_encode %{ +++ __ vsetvli(t0, x0, Assembler::e16); +++ __ vrsub_vi(as_VectorRegister($tmp$$reg), as_VectorRegister($src$$reg), 0); +++ __ vmax_vv(as_VectorRegister($dst$$reg), as_VectorRegister($tmp$$reg), as_VectorRegister($src$$reg)); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++instruct vabsI(vReg dst, vReg src, vReg tmp) %{ +++ match(Set dst (AbsVI src)); +++ ins_cost(VEC_COST); +++ effect(TEMP tmp); +++ format %{ "vrsub.vi $tmp, 0, $src\t#@vabsI\n\t" +++ "vmax.vv $dst, $tmp, $src" %} +++ ins_encode %{ +++ __ vsetvli(t0, x0, Assembler::e32); +++ __ vrsub_vi(as_VectorRegister($tmp$$reg), as_VectorRegister($src$$reg), 0); +++ __ vmax_vv(as_VectorRegister($dst$$reg), as_VectorRegister($tmp$$reg), as_VectorRegister($src$$reg)); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++instruct vabsL(vReg dst, vReg src, vReg tmp) %{ +++ match(Set dst (AbsVL src)); +++ ins_cost(VEC_COST); +++ effect(TEMP tmp); +++ format %{ "vrsub.vi $tmp, 0, $src\t#@vabsL\n\t" +++ "vmax.vv $dst, $tmp, $src" %} +++ ins_encode %{ +++ __ vsetvli(t0, x0, Assembler::e64); +++ __ vrsub_vi(as_VectorRegister($tmp$$reg), as_VectorRegister($src$$reg), 0); +++ __ vmax_vv(as_VectorRegister($dst$$reg), as_VectorRegister($tmp$$reg), as_VectorRegister($src$$reg)); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++instruct vabsF(vReg dst, vReg src) %{ +++ match(Set dst (AbsVF src)); +++ ins_cost(VEC_COST); +++ format %{ "vfsgnjx.vv $dst, $src, $src, vm\t#@vabsF" %} +++ ins_encode %{ +++ __ vsetvli(t0, x0, Assembler::e32); +++ __ vfsgnjx_vv(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg), as_VectorRegister($src$$reg)); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++instruct vabsD(vReg dst, vReg src) %{ +++ match(Set dst (AbsVD src)); +++ ins_cost(VEC_COST); +++ format %{ "vfsgnjx.vv $dst, $src, $src, vm\t#@vabsD" %} +++ ins_encode %{ +++ __ vsetvli(t0, x0, Assembler::e64); +++ __ vfsgnjx_vv(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg), as_VectorRegister($src$$reg)); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++// vector add +++ +++instruct vaddB(vReg dst, vReg src1, vReg src2) %{ +++ match(Set dst (AddVB src1 src2)); +++ ins_cost(VEC_COST); +++ format %{ "vadd.vv $dst, $src1, $src2\t#@vaddB" %} +++ ins_encode %{ +++ __ vsetvli(t0, x0, Assembler::e8); +++ __ vadd_vv(as_VectorRegister($dst$$reg), +++ as_VectorRegister($src1$$reg), +++ as_VectorRegister($src2$$reg)); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++instruct vaddS(vReg dst, vReg src1, vReg src2) %{ +++ match(Set dst (AddVS src1 src2)); +++ ins_cost(VEC_COST); +++ format %{ "vadd.vv $dst, $src1, $src2\t#@vaddS" %} +++ ins_encode %{ +++ __ vsetvli(t0, x0, Assembler::e16); +++ __ vadd_vv(as_VectorRegister($dst$$reg), +++ as_VectorRegister($src1$$reg), +++ as_VectorRegister($src2$$reg)); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++instruct vaddI(vReg dst, vReg src1, vReg src2) %{ +++ match(Set dst (AddVI src1 src2)); +++ ins_cost(VEC_COST); +++ format %{ "vadd.vv $dst, $src1, $src2\t#@vaddI" %} +++ ins_encode %{ +++ __ vsetvli(t0, x0, Assembler::e32); +++ __ vadd_vv(as_VectorRegister($dst$$reg), +++ as_VectorRegister($src1$$reg), +++ as_VectorRegister($src2$$reg)); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++instruct vaddL(vReg dst, vReg src1, vReg src2) %{ +++ match(Set dst (AddVL src1 src2)); +++ ins_cost(VEC_COST); +++ format %{ "vadd.vv $dst, $src1, $src2\t#@vaddL" %} +++ ins_encode %{ +++ __ vsetvli(t0, x0, Assembler::e64); +++ __ vadd_vv(as_VectorRegister($dst$$reg), +++ as_VectorRegister($src1$$reg), +++ as_VectorRegister($src2$$reg)); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++instruct vaddF(vReg dst, vReg src1, vReg src2) %{ +++ match(Set dst (AddVF src1 src2)); +++ ins_cost(VEC_COST); +++ format %{ "vfadd.vv $dst, $src1, $src2\t#@vaddF" %} +++ ins_encode %{ +++ __ vsetvli(t0, x0, Assembler::e32); +++ __ vfadd_vv(as_VectorRegister($dst$$reg), +++ as_VectorRegister($src1$$reg), +++ as_VectorRegister($src2$$reg)); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++instruct vaddD(vReg dst, vReg src1, vReg src2) %{ +++ match(Set dst (AddVD src1 src2)); +++ ins_cost(VEC_COST); +++ format %{ "vfadd.vv $dst, $src1, $src2\t#@vaddD" %} +++ ins_encode %{ +++ __ vsetvli(t0, x0, Assembler::e64); +++ __ vfadd_vv(as_VectorRegister($dst$$reg), +++ as_VectorRegister($src1$$reg), +++ as_VectorRegister($src2$$reg)); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++// vector and +++ +++instruct vand(vReg dst, vReg src1, vReg src2) %{ +++ match(Set dst (AndV src1 src2)); +++ ins_cost(VEC_COST); +++ format %{ "vand.vv $dst, $src1, $src2\t#@vand" %} +++ ins_encode %{ +++ __ vsetvli(t0, x0, Assembler::e64); +++ __ vand_vv(as_VectorRegister($dst$$reg), +++ as_VectorRegister($src1$$reg), +++ as_VectorRegister($src2$$reg)); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++// vector or +++ +++instruct vor(vReg dst, vReg src1, vReg src2) %{ +++ match(Set dst (OrV src1 src2)); +++ ins_cost(VEC_COST); +++ format %{ "vor.vv $dst, $src1, $src2\t#@vor" %} +++ ins_encode %{ +++ __ vsetvli(t0, x0, Assembler::e64); +++ __ vor_vv(as_VectorRegister($dst$$reg), +++ as_VectorRegister($src1$$reg), +++ as_VectorRegister($src2$$reg)); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++// vector xor +++ +++instruct vxor(vReg dst, vReg src1, vReg src2) %{ +++ match(Set dst (XorV src1 src2)); +++ ins_cost(VEC_COST); +++ format %{ "vxor.vv $dst, $src1, $src2\t#@vxor" %} +++ ins_encode %{ +++ __ vsetvli(t0, x0, Assembler::e64); +++ __ vxor_vv(as_VectorRegister($dst$$reg), +++ as_VectorRegister($src1$$reg), +++ as_VectorRegister($src2$$reg)); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++// vector float div +++ +++instruct vdivF(vReg dst, vReg src1, vReg src2) %{ +++ match(Set dst (DivVF src1 src2)); +++ ins_cost(VEC_COST); +++ format %{ "vfdiv.vv $dst, $src1, $src2\t#@vdivF" %} +++ ins_encode %{ +++ __ vsetvli(t0, x0, Assembler::e32); +++ __ vfdiv_vv(as_VectorRegister($dst$$reg), +++ as_VectorRegister($src1$$reg), +++ as_VectorRegister($src2$$reg)); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++instruct vdivD(vReg dst, vReg src1, vReg src2) %{ +++ match(Set dst (DivVD src1 src2)); +++ ins_cost(VEC_COST); +++ format %{ "vfdiv.vv $dst, $src1, $src2\t#@vdivD" %} +++ ins_encode %{ +++ __ vsetvli(t0, x0, Assembler::e64); +++ __ vfdiv_vv(as_VectorRegister($dst$$reg), +++ as_VectorRegister($src1$$reg), +++ as_VectorRegister($src2$$reg)); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++// vector integer max/min +++ +++instruct vmax(vReg dst, vReg src1, vReg src2) %{ +++ predicate(n->bottom_type()->is_vect()->element_basic_type() != T_FLOAT && +++ n->bottom_type()->is_vect()->element_basic_type() != T_DOUBLE); +++ match(Set dst (MaxV src1 src2)); +++ ins_cost(VEC_COST); +++ format %{ "vmax.vv $dst, $src1, $src2\t#@vmax" %} +++ ins_encode %{ +++ BasicType bt = vector_element_basic_type(this); +++ Assembler::SEW sew = Assembler::elemtype_to_sew(bt); +++ __ vsetvli(t0, x0, sew); +++ __ vmax_vv(as_VectorRegister($dst$$reg), +++ as_VectorRegister($src1$$reg), as_VectorRegister($src2$$reg)); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++instruct vmin(vReg dst, vReg src1, vReg src2) %{ +++ predicate(n->bottom_type()->is_vect()->element_basic_type() != T_FLOAT && +++ n->bottom_type()->is_vect()->element_basic_type() != T_DOUBLE); +++ match(Set dst (MinV src1 src2)); +++ ins_cost(VEC_COST); +++ format %{ "vmin.vv $dst, $src1, $src2\t#@vmin" %} +++ ins_encode %{ +++ BasicType bt = vector_element_basic_type(this); +++ Assembler::SEW sew = Assembler::elemtype_to_sew(bt); +++ __ vsetvli(t0, x0, sew); +++ __ vmin_vv(as_VectorRegister($dst$$reg), +++ as_VectorRegister($src1$$reg), as_VectorRegister($src2$$reg)); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++// vector float-point max/min +++ +++instruct vmaxF(vReg dst, vReg src1, vReg src2) %{ +++ predicate(n->bottom_type()->is_vect()->element_basic_type() == T_FLOAT); +++ match(Set dst (MaxV src1 src2)); +++ effect(TEMP_DEF dst); +++ ins_cost(VEC_COST); +++ format %{ "vmaxF $dst, $src1, $src2\t#@vmaxF" %} +++ ins_encode %{ +++ __ minmax_FD_v(as_VectorRegister($dst$$reg), +++ as_VectorRegister($src1$$reg), as_VectorRegister($src2$$reg), +++ false /* is_double */, false /* is_min */); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++instruct vmaxD(vReg dst, vReg src1, vReg src2) %{ +++ predicate(n->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE); +++ match(Set dst (MaxV src1 src2)); +++ effect(TEMP_DEF dst); +++ ins_cost(VEC_COST); +++ format %{ "vmaxD $dst, $src1, $src2\t#@vmaxD" %} +++ ins_encode %{ +++ __ minmax_FD_v(as_VectorRegister($dst$$reg), +++ as_VectorRegister($src1$$reg), as_VectorRegister($src2$$reg), +++ true /* is_double */, false /* is_min */); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++instruct vminF(vReg dst, vReg src1, vReg src2) %{ +++ predicate(n->bottom_type()->is_vect()->element_basic_type() == T_FLOAT); +++ match(Set dst (MinV src1 src2)); +++ effect(TEMP_DEF dst); +++ ins_cost(VEC_COST); +++ format %{ "vminF $dst, $src1, $src2\t#@vminF" %} +++ ins_encode %{ +++ __ minmax_FD_v(as_VectorRegister($dst$$reg), +++ as_VectorRegister($src1$$reg), as_VectorRegister($src2$$reg), +++ false /* is_double */, true /* is_min */); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++instruct vminD(vReg dst, vReg src1, vReg src2) %{ +++ predicate(n->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE); +++ match(Set dst (MinV src1 src2)); +++ effect(TEMP_DEF dst); +++ ins_cost(VEC_COST); +++ format %{ "vminD $dst, $src1, $src2\t#@vminD" %} +++ ins_encode %{ +++ __ minmax_FD_v(as_VectorRegister($dst$$reg), +++ as_VectorRegister($src1$$reg), as_VectorRegister($src2$$reg), +++ true /* is_double */, true /* is_min */); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++// vector fmla +++ +++// dst_src1 = dst_src1 + src2 * src3 +++instruct vfmlaF(vReg dst_src1, vReg src2, vReg src3) %{ +++ predicate(UseFMA); +++ match(Set dst_src1 (FmaVF dst_src1 (Binary src2 src3))); +++ ins_cost(VEC_COST); +++ format %{ "vfmacc.vv $dst_src1, $src2, $src3\t#@vfmlaF" %} +++ ins_encode %{ +++ __ vsetvli(t0, x0, Assembler::e32); +++ __ vfmacc_vv(as_VectorRegister($dst_src1$$reg), +++ as_VectorRegister($src2$$reg), as_VectorRegister($src3$$reg)); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++// dst_src1 = dst_src1 + src2 * src3 +++instruct vfmlaD(vReg dst_src1, vReg src2, vReg src3) %{ +++ predicate(UseFMA); +++ match(Set dst_src1 (FmaVD dst_src1 (Binary src2 src3))); +++ ins_cost(VEC_COST); +++ format %{ "vfmacc.vv $dst_src1, $src2, $src3\t#@vfmlaD" %} +++ ins_encode %{ +++ __ vsetvli(t0, x0, Assembler::e64); +++ __ vfmacc_vv(as_VectorRegister($dst_src1$$reg), +++ as_VectorRegister($src2$$reg), as_VectorRegister($src3$$reg)); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++// vector fmls +++ +++// dst_src1 = dst_src1 + -src2 * src3 +++// dst_src1 = dst_src1 + src2 * -src3 +++instruct vfmlsF(vReg dst_src1, vReg src2, vReg src3) %{ +++ predicate(UseFMA); +++ match(Set dst_src1 (FmaVF dst_src1 (Binary (NegVF src2) src3))); +++ match(Set dst_src1 (FmaVF dst_src1 (Binary src2 (NegVF src3)))); +++ ins_cost(VEC_COST); +++ format %{ "vfnmsac.vv $dst_src1, $src2, $src3\t#@vfmlsF" %} +++ ins_encode %{ +++ __ vsetvli(t0, x0, Assembler::e32); +++ __ vfnmsac_vv(as_VectorRegister($dst_src1$$reg), +++ as_VectorRegister($src2$$reg), as_VectorRegister($src3$$reg)); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++// dst_src1 = dst_src1 + -src2 * src3 +++// dst_src1 = dst_src1 + src2 * -src3 +++instruct vfmlsD(vReg dst_src1, vReg src2, vReg src3) %{ +++ predicate(UseFMA); +++ match(Set dst_src1 (FmaVD dst_src1 (Binary (NegVD src2) src3))); +++ match(Set dst_src1 (FmaVD dst_src1 (Binary src2 (NegVD src3)))); +++ ins_cost(VEC_COST); +++ format %{ "vfnmsac.vv $dst_src1, $src2, $src3\t#@vfmlsD" %} +++ ins_encode %{ +++ __ vsetvli(t0, x0, Assembler::e64); +++ __ vfnmsac_vv(as_VectorRegister($dst_src1$$reg), +++ as_VectorRegister($src2$$reg), as_VectorRegister($src3$$reg)); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++// vector fnmla +++ +++// dst_src1 = -dst_src1 + -src2 * src3 +++// dst_src1 = -dst_src1 + src2 * -src3 +++instruct vfnmlaF(vReg dst_src1, vReg src2, vReg src3) %{ +++ predicate(UseFMA); +++ match(Set dst_src1 (FmaVF (NegVF dst_src1) (Binary (NegVF src2) src3))); +++ match(Set dst_src1 (FmaVF (NegVF dst_src1) (Binary src2 (NegVF src3)))); +++ ins_cost(VEC_COST); +++ format %{ "vfnmacc.vv $dst_src1, $src2, $src3\t#@vfnmlaF" %} +++ ins_encode %{ +++ __ vsetvli(t0, x0, Assembler::e32); +++ __ vfnmacc_vv(as_VectorRegister($dst_src1$$reg), +++ as_VectorRegister($src2$$reg), as_VectorRegister($src3$$reg)); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++// dst_src1 = -dst_src1 + -src2 * src3 +++// dst_src1 = -dst_src1 + src2 * -src3 +++instruct vfnmlaD(vReg dst_src1, vReg src2, vReg src3) %{ +++ predicate(UseFMA); +++ match(Set dst_src1 (FmaVD (NegVD dst_src1) (Binary (NegVD src2) src3))); +++ match(Set dst_src1 (FmaVD (NegVD dst_src1) (Binary src2 (NegVD src3)))); +++ ins_cost(VEC_COST); +++ format %{ "vfnmacc.vv $dst_src1, $src2, $src3\t#@vfnmlaD" %} +++ ins_encode %{ +++ __ vsetvli(t0, x0, Assembler::e64); +++ __ vfnmacc_vv(as_VectorRegister($dst_src1$$reg), +++ as_VectorRegister($src2$$reg), as_VectorRegister($src3$$reg)); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++// vector fnmls +++ +++// dst_src1 = -dst_src1 + src2 * src3 +++instruct vfnmlsF(vReg dst_src1, vReg src2, vReg src3) %{ +++ predicate(UseFMA); +++ match(Set dst_src1 (FmaVF (NegVF dst_src1) (Binary src2 src3))); +++ ins_cost(VEC_COST); +++ format %{ "vfmsac.vv $dst_src1, $src2, $src3\t#@vfnmlsF" %} +++ ins_encode %{ +++ __ vsetvli(t0, x0, Assembler::e32); +++ __ vfmsac_vv(as_VectorRegister($dst_src1$$reg), +++ as_VectorRegister($src2$$reg), as_VectorRegister($src3$$reg)); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++// dst_src1 = -dst_src1 + src2 * src3 +++instruct vfnmlsD(vReg dst_src1, vReg src2, vReg src3) %{ +++ predicate(UseFMA); +++ match(Set dst_src1 (FmaVD (NegVD dst_src1) (Binary src2 src3))); +++ ins_cost(VEC_COST); +++ format %{ "vfmsac.vv $dst_src1, $src2, $src3\t#@vfnmlsD" %} +++ ins_encode %{ +++ __ vsetvli(t0, x0, Assembler::e64); +++ __ vfmsac_vv(as_VectorRegister($dst_src1$$reg), +++ as_VectorRegister($src2$$reg), as_VectorRegister($src3$$reg)); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++// vector mla +++ +++// dst_src1 = dst_src1 + src2 * src3 +++instruct vmlaB(vReg dst_src1, vReg src2, vReg src3) %{ +++ match(Set dst_src1 (AddVB dst_src1 (MulVB src2 src3))); +++ ins_cost(VEC_COST); +++ format %{ "vmacc.vv $dst_src1, src2, src3\t#@vmlaB" %} +++ ins_encode %{ +++ __ vsetvli(t0, x0, Assembler::e8); +++ __ vmacc_vv(as_VectorRegister($dst_src1$$reg), +++ as_VectorRegister($src2$$reg), as_VectorRegister($src3$$reg)); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++// dst_src1 = dst_src1 + src2 * src3 +++instruct vmlaS(vReg dst_src1, vReg src2, vReg src3) %{ +++ match(Set dst_src1 (AddVS dst_src1 (MulVS src2 src3))); +++ ins_cost(VEC_COST); +++ format %{ "vmacc.vv $dst_src1, src2, src3\t#@vmlaS" %} +++ ins_encode %{ +++ __ vsetvli(t0, x0, Assembler::e16); +++ __ vmacc_vv(as_VectorRegister($dst_src1$$reg), +++ as_VectorRegister($src2$$reg), as_VectorRegister($src3$$reg)); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++// dst_src1 = dst_src1 + src2 * src3 +++instruct vmlaI(vReg dst_src1, vReg src2, vReg src3) %{ +++ match(Set dst_src1 (AddVI dst_src1 (MulVI src2 src3))); +++ ins_cost(VEC_COST); +++ format %{ "vmacc.vv $dst_src1, src2, src3\t#@vmlaI" %} +++ ins_encode %{ +++ __ vsetvli(t0, x0, Assembler::e32); +++ __ vmacc_vv(as_VectorRegister($dst_src1$$reg), +++ as_VectorRegister($src2$$reg), as_VectorRegister($src3$$reg)); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++// dst_src1 = dst_src1 + src2 * src3 +++instruct vmlaL(vReg dst_src1, vReg src2, vReg src3) %{ +++ match(Set dst_src1 (AddVL dst_src1 (MulVL src2 src3))); +++ ins_cost(VEC_COST); +++ format %{ "vmacc.vv $dst_src1, src2, src3\t#@vmlaL" %} +++ ins_encode %{ +++ __ vsetvli(t0, x0, Assembler::e64); +++ __ vmacc_vv(as_VectorRegister($dst_src1$$reg), +++ as_VectorRegister($src2$$reg), as_VectorRegister($src3$$reg)); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++// vector mls +++ +++// dst_src1 = dst_src1 - src2 * src3 +++instruct vmlsB(vReg dst_src1, vReg src2, vReg src3) %{ +++ match(Set dst_src1 (SubVB dst_src1 (MulVB src2 src3))); +++ ins_cost(VEC_COST); +++ format %{ "vnmsac.vv $dst_src1, src2, src3\t#@vmlsB" %} +++ ins_encode %{ +++ __ vsetvli(t0, x0, Assembler::e8); +++ __ vnmsac_vv(as_VectorRegister($dst_src1$$reg), +++ as_VectorRegister($src2$$reg), as_VectorRegister($src3$$reg)); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++// dst_src1 = dst_src1 - src2 * src3 +++instruct vmlsS(vReg dst_src1, vReg src2, vReg src3) %{ +++ match(Set dst_src1 (SubVS dst_src1 (MulVS src2 src3))); +++ ins_cost(VEC_COST); +++ format %{ "vnmsac.vv $dst_src1, src2, src3\t#@vmlsS" %} +++ ins_encode %{ +++ __ vsetvli(t0, x0, Assembler::e16); +++ __ vnmsac_vv(as_VectorRegister($dst_src1$$reg), +++ as_VectorRegister($src2$$reg), as_VectorRegister($src3$$reg)); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++// dst_src1 = dst_src1 - src2 * src3 +++instruct vmlsI(vReg dst_src1, vReg src2, vReg src3) %{ +++ match(Set dst_src1 (SubVI dst_src1 (MulVI src2 src3))); +++ ins_cost(VEC_COST); +++ format %{ "vnmsac.vv $dst_src1, src2, src3\t#@vmlsI" %} +++ ins_encode %{ +++ __ vsetvli(t0, x0, Assembler::e32); +++ __ vnmsac_vv(as_VectorRegister($dst_src1$$reg), +++ as_VectorRegister($src2$$reg), as_VectorRegister($src3$$reg)); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++// dst_src1 = dst_src1 - src2 * src3 +++instruct vmlsL(vReg dst_src1, vReg src2, vReg src3) %{ +++ match(Set dst_src1 (SubVL dst_src1 (MulVL src2 src3))); +++ ins_cost(VEC_COST); +++ format %{ "vnmsac.vv $dst_src1, src2, src3\t#@vmlsL" %} +++ ins_encode %{ +++ __ vsetvli(t0, x0, Assembler::e64); +++ __ vnmsac_vv(as_VectorRegister($dst_src1$$reg), +++ as_VectorRegister($src2$$reg), as_VectorRegister($src3$$reg)); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++// vector mul +++ +++instruct vmulB(vReg dst, vReg src1, vReg src2) %{ +++ match(Set dst (MulVB src1 src2)); +++ ins_cost(VEC_COST); +++ format %{ "vmul.vv $dst, $src1, $src2\t#@vmulB" %} +++ ins_encode %{ +++ __ vsetvli(t0, x0, Assembler::e8); +++ __ vmul_vv(as_VectorRegister($dst$$reg), as_VectorRegister($src1$$reg), +++ as_VectorRegister($src2$$reg)); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++instruct vmulS(vReg dst, vReg src1, vReg src2) %{ +++ match(Set dst (MulVS src1 src2)); +++ ins_cost(VEC_COST); +++ format %{ "vmul.vv $dst, $src1, $src2\t#@vmulS" %} +++ ins_encode %{ +++ __ vsetvli(t0, x0, Assembler::e16); +++ __ vmul_vv(as_VectorRegister($dst$$reg), as_VectorRegister($src1$$reg), +++ as_VectorRegister($src2$$reg)); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++instruct vmulI(vReg dst, vReg src1, vReg src2) %{ +++ match(Set dst (MulVI src1 src2)); +++ ins_cost(VEC_COST); +++ format %{ "vmul.vv $dst, $src1, $src2\t#@vmulI" %} +++ ins_encode %{ +++ __ vsetvli(t0, x0, Assembler::e32); +++ __ vmul_vv(as_VectorRegister($dst$$reg), as_VectorRegister($src1$$reg), +++ as_VectorRegister($src2$$reg)); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++instruct vmulL(vReg dst, vReg src1, vReg src2) %{ +++ match(Set dst (MulVL src1 src2)); +++ ins_cost(VEC_COST); +++ format %{ "vmul.vv $dst, $src1, $src2\t#@vmulL" %} +++ ins_encode %{ +++ __ vsetvli(t0, x0, Assembler::e64); +++ __ vmul_vv(as_VectorRegister($dst$$reg), as_VectorRegister($src1$$reg), +++ as_VectorRegister($src2$$reg)); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++instruct vmulF(vReg dst, vReg src1, vReg src2) %{ +++ match(Set dst (MulVF src1 src2)); +++ ins_cost(VEC_COST); +++ format %{ "vfmul.vv $dst, $src1, $src2\t#@vmulF" %} +++ ins_encode %{ +++ __ vsetvli(t0, x0, Assembler::e32); +++ __ vfmul_vv(as_VectorRegister($dst$$reg), as_VectorRegister($src1$$reg), +++ as_VectorRegister($src2$$reg)); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++instruct vmulD(vReg dst, vReg src1, vReg src2) %{ +++ match(Set dst (MulVD src1 src2)); +++ ins_cost(VEC_COST); +++ format %{ "vfmul.vv $dst, $src1, $src2\t#@vmulD" %} +++ ins_encode %{ +++ __ vsetvli(t0, x0, Assembler::e64); +++ __ vfmul_vv(as_VectorRegister($dst$$reg), as_VectorRegister($src1$$reg), +++ as_VectorRegister($src2$$reg)); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++// vector fneg +++ +++instruct vnegF(vReg dst, vReg src) %{ +++ match(Set dst (NegVF src)); +++ ins_cost(VEC_COST); +++ format %{ "vfsgnjn.vv $dst, $src, $src\t#@vnegF" %} +++ ins_encode %{ +++ __ vsetvli(t0, x0, Assembler::e32); +++ __ vfneg_v(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg)); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++instruct vnegD(vReg dst, vReg src) %{ +++ match(Set dst (NegVD src)); +++ ins_cost(VEC_COST); +++ format %{ "vfsgnjn.vv $dst, $src, $src\t#@vnegD" %} +++ ins_encode %{ +++ __ vsetvli(t0, x0, Assembler::e64); +++ __ vfneg_v(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg)); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++// vector add reduction +++ +++instruct reduce_addB(iRegINoSp dst, iRegIorL2I src1, vReg src2, vReg tmp) %{ +++ predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_BYTE); +++ match(Set dst (AddReductionVI src1 src2)); +++ effect(TEMP tmp); +++ ins_cost(VEC_COST); +++ format %{ "vmv.s.x $tmp, $src1\t#@reduce_addB\n\t" +++ "vredsum.vs $tmp, $src2, $tmp\n\t" +++ "vmv.x.s $dst, $tmp" %} +++ ins_encode %{ +++ __ vsetvli(t0, x0, Assembler::e8); +++ __ vmv_s_x(as_VectorRegister($tmp$$reg), $src1$$Register); +++ __ vredsum_vs(as_VectorRegister($tmp$$reg), as_VectorRegister($src2$$reg), +++ as_VectorRegister($tmp$$reg)); +++ __ vmv_x_s($dst$$Register, as_VectorRegister($tmp$$reg)); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++instruct reduce_addS(iRegINoSp dst, iRegIorL2I src1, vReg src2, vReg tmp) %{ +++ predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_SHORT); +++ match(Set dst (AddReductionVI src1 src2)); +++ effect(TEMP tmp); +++ ins_cost(VEC_COST); +++ format %{ "vmv.s.x $tmp, $src1\t#@reduce_addS\n\t" +++ "vredsum.vs $tmp, $src2, $tmp\n\t" +++ "vmv.x.s $dst, $tmp" %} +++ ins_encode %{ +++ __ vsetvli(t0, x0, Assembler::e16); +++ __ vmv_s_x(as_VectorRegister($tmp$$reg), $src1$$Register); +++ __ vredsum_vs(as_VectorRegister($tmp$$reg), as_VectorRegister($src2$$reg), +++ as_VectorRegister($tmp$$reg)); +++ __ vmv_x_s($dst$$Register, as_VectorRegister($tmp$$reg)); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++instruct reduce_addI(iRegINoSp dst, iRegIorL2I src1, vReg src2, vReg tmp) %{ +++ predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_INT); +++ match(Set dst (AddReductionVI src1 src2)); +++ effect(TEMP tmp); +++ ins_cost(VEC_COST); +++ format %{ "vmv.s.x $tmp, $src1\t#@reduce_addI\n\t" +++ "vredsum.vs $tmp, $src2, $tmp\n\t" +++ "vmv.x.s $dst, $tmp" %} +++ ins_encode %{ +++ __ vsetvli(t0, x0, Assembler::e32); +++ __ vmv_s_x(as_VectorRegister($tmp$$reg), $src1$$Register); +++ __ vredsum_vs(as_VectorRegister($tmp$$reg), as_VectorRegister($src2$$reg), +++ as_VectorRegister($tmp$$reg)); +++ __ vmv_x_s($dst$$Register, as_VectorRegister($tmp$$reg)); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++instruct reduce_addL(iRegLNoSp dst, iRegL src1, vReg src2, vReg tmp) %{ +++ predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_LONG); +++ match(Set dst (AddReductionVL src1 src2)); +++ effect(TEMP tmp); +++ ins_cost(VEC_COST); +++ format %{ "vmv.s.x $tmp, $src1\t#@reduce_addL\n\t" +++ "vredsum.vs $tmp, $src2, $tmp\n\t" +++ "vmv.x.s $dst, $tmp" %} +++ ins_encode %{ +++ __ vsetvli(t0, x0, Assembler::e64); +++ __ vmv_s_x(as_VectorRegister($tmp$$reg), $src1$$Register); +++ __ vredsum_vs(as_VectorRegister($tmp$$reg), as_VectorRegister($src2$$reg), +++ as_VectorRegister($tmp$$reg)); +++ __ vmv_x_s($dst$$Register, as_VectorRegister($tmp$$reg)); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++instruct reduce_addF(fRegF src1_dst, vReg src2, vReg tmp) %{ +++ match(Set src1_dst (AddReductionVF src1_dst src2)); +++ effect(TEMP tmp); +++ ins_cost(VEC_COST); +++ format %{ "vfmv.s.f $tmp, $src1_dst\t#@reduce_addF\n\t" +++ "vfredosum.vs $tmp, $src2, $tmp\n\t" +++ "vfmv.f.s $src1_dst, $tmp" %} +++ ins_encode %{ +++ __ vsetvli(t0, x0, Assembler::e32); +++ __ vfmv_s_f(as_VectorRegister($tmp$$reg), $src1_dst$$FloatRegister); +++ __ vfredosum_vs(as_VectorRegister($tmp$$reg), as_VectorRegister($src2$$reg), +++ as_VectorRegister($tmp$$reg)); +++ __ vfmv_f_s($src1_dst$$FloatRegister, as_VectorRegister($tmp$$reg)); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++instruct reduce_addD(fRegD src1_dst, vReg src2, vReg tmp) %{ +++ match(Set src1_dst (AddReductionVD src1_dst src2)); +++ effect(TEMP tmp); +++ ins_cost(VEC_COST); +++ format %{ "vfmv.s.f $tmp, $src1_dst\t#@reduce_addD\n\t" +++ "vfredosum.vs $tmp, $src2, $tmp\n\t" +++ "vfmv.f.s $src1_dst, $tmp" %} +++ ins_encode %{ +++ __ vsetvli(t0, x0, Assembler::e64); +++ __ vfmv_s_f(as_VectorRegister($tmp$$reg), $src1_dst$$FloatRegister); +++ __ vfredosum_vs(as_VectorRegister($tmp$$reg), as_VectorRegister($src2$$reg), +++ as_VectorRegister($tmp$$reg)); +++ __ vfmv_f_s($src1_dst$$FloatRegister, as_VectorRegister($tmp$$reg)); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++// vector integer max reduction +++instruct vreduce_maxB(iRegINoSp dst, iRegI src1, vReg src2, vReg tmp) %{ +++ predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_BYTE); +++ match(Set dst (MaxReductionV src1 src2)); +++ ins_cost(VEC_COST); +++ effect(TEMP tmp); +++ format %{ "vreduce_maxB $dst, $src1, $src2, $tmp" %} +++ ins_encode %{ +++ __ vsetvli(t0, x0, Assembler::e8); +++ __ vredmax_vs(as_VectorRegister($tmp$$reg), as_VectorRegister($src2$$reg), as_VectorRegister($src2$$reg)); +++ __ vmv_x_s($dst$$Register, as_VectorRegister($tmp$$reg)); +++ Label Ldone; +++ __ ble(as_Register($src1$$reg), as_Register($dst$$reg), Ldone); +++ __ mv(as_Register($dst$$reg), as_Register($src1$$reg)); +++ __ bind(Ldone); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++instruct vreduce_maxS(iRegINoSp dst, iRegI src1, vReg src2, vReg tmp) %{ +++ predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_SHORT); +++ match(Set dst (MaxReductionV src1 src2)); +++ ins_cost(VEC_COST); +++ effect(TEMP tmp); +++ format %{ "vreduce_maxS $dst, $src1, $src2, $tmp" %} +++ ins_encode %{ +++ __ vsetvli(t0, x0, Assembler::e16); +++ __ vredmax_vs(as_VectorRegister($tmp$$reg), as_VectorRegister($src2$$reg), as_VectorRegister($src2$$reg)); +++ __ vmv_x_s($dst$$Register, as_VectorRegister($tmp$$reg)); +++ Label Ldone; +++ __ ble(as_Register($src1$$reg), as_Register($dst$$reg), Ldone); +++ __ mv(as_Register($dst$$reg), as_Register($src1$$reg)); +++ __ bind(Ldone); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++instruct vreduce_maxI(iRegINoSp dst, iRegIorL2I src1, vReg src2, vReg tmp) %{ +++ predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_INT); +++ match(Set dst (MaxReductionV src1 src2)); +++ ins_cost(VEC_COST); +++ effect(TEMP tmp); +++ format %{ "vreduce_maxI $dst, $src1, $src2, $tmp" %} +++ ins_encode %{ +++ __ vsetvli(t0, x0, Assembler::e32); +++ __ vmv_s_x(as_VectorRegister($tmp$$reg), $src1$$Register); +++ __ vredmax_vs(as_VectorRegister($tmp$$reg), as_VectorRegister($src2$$reg), as_VectorRegister($tmp$$reg)); +++ __ vmv_x_s($dst$$Register, as_VectorRegister($tmp$$reg)); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++instruct vreduce_maxL(iRegLNoSp dst, iRegL src1, vReg src2, vReg tmp) %{ +++ predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_LONG); +++ match(Set dst (MaxReductionV src1 src2)); +++ ins_cost(VEC_COST); +++ effect(TEMP tmp); +++ format %{ "vreduce_maxL $dst, $src1, $src2, $tmp" %} +++ ins_encode %{ +++ __ vsetvli(t0, x0, Assembler::e64); +++ __ vmv_s_x(as_VectorRegister($tmp$$reg), $src1$$Register); +++ __ vredmax_vs(as_VectorRegister($tmp$$reg), as_VectorRegister($src2$$reg), as_VectorRegister($tmp$$reg)); +++ __ vmv_x_s($dst$$Register, as_VectorRegister($tmp$$reg)); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++// vector integer min reduction +++instruct vreduce_minB(iRegINoSp dst, iRegI src1, vReg src2, vReg tmp) %{ +++ predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_BYTE); +++ match(Set dst (MinReductionV src1 src2)); +++ ins_cost(VEC_COST); +++ effect(TEMP tmp); +++ format %{ "vreduce_minB $dst, $src1, $src2, $tmp" %} +++ ins_encode %{ +++ __ vsetvli(t0, x0, Assembler::e8); +++ __ vredmin_vs(as_VectorRegister($tmp$$reg), as_VectorRegister($src2$$reg), as_VectorRegister($src2$$reg)); +++ __ vmv_x_s($dst$$Register, as_VectorRegister($tmp$$reg)); +++ Label Ldone; +++ __ bge(as_Register($src1$$reg), as_Register($dst$$reg), Ldone); +++ __ mv(as_Register($dst$$reg), as_Register($src1$$reg)); +++ __ bind(Ldone); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++instruct vreduce_minS(iRegINoSp dst, iRegI src1, vReg src2, vReg tmp) %{ +++ predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_SHORT); +++ match(Set dst (MinReductionV src1 src2)); +++ ins_cost(VEC_COST); +++ effect(TEMP tmp); +++ format %{ "vreduce_minS $dst, $src1, $src2, $tmp" %} +++ ins_encode %{ +++ __ vsetvli(t0, x0, Assembler::e16); +++ __ vredmin_vs(as_VectorRegister($tmp$$reg), as_VectorRegister($src2$$reg), as_VectorRegister($src2$$reg)); +++ __ vmv_x_s($dst$$Register, as_VectorRegister($tmp$$reg)); +++ Label Ldone; +++ __ bge(as_Register($src1$$reg), as_Register($dst$$reg), Ldone); +++ __ mv(as_Register($dst$$reg), as_Register($src1$$reg)); +++ __ bind(Ldone); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++instruct vreduce_minI(iRegINoSp dst, iRegIorL2I src1, vReg src2, vReg tmp) %{ +++ predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_INT); +++ match(Set dst (MinReductionV src1 src2)); +++ ins_cost(VEC_COST); +++ effect(TEMP tmp); +++ format %{ "vreduce_minI $dst, $src1, $src2, $tmp" %} +++ ins_encode %{ +++ __ vsetvli(t0, x0, Assembler::e32); +++ __ vmv_s_x(as_VectorRegister($tmp$$reg), $src1$$Register); +++ __ vredmin_vs(as_VectorRegister($tmp$$reg), as_VectorRegister($src2$$reg), as_VectorRegister($tmp$$reg)); +++ __ vmv_x_s($dst$$Register, as_VectorRegister($tmp$$reg)); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++instruct vreduce_minL(iRegLNoSp dst, iRegL src1, vReg src2, vReg tmp) %{ +++ predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_LONG); +++ match(Set dst (MinReductionV src1 src2)); +++ ins_cost(VEC_COST); +++ effect(TEMP tmp); +++ format %{ "vreduce_minL $dst, $src1, $src2, $tmp" %} +++ ins_encode %{ +++ __ vsetvli(t0, x0, Assembler::e64); +++ __ vmv_s_x(as_VectorRegister($tmp$$reg), $src1$$Register); +++ __ vredmin_vs(as_VectorRegister($tmp$$reg), as_VectorRegister($src2$$reg), as_VectorRegister($tmp$$reg)); +++ __ vmv_x_s($dst$$Register, as_VectorRegister($tmp$$reg)); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++// vector float max reduction +++ +++instruct vreduce_maxF(fRegF dst, fRegF src1, vReg src2, vReg tmp1, vReg tmp2) %{ +++ predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT); +++ match(Set dst (MaxReductionV src1 src2)); +++ ins_cost(VEC_COST); +++ effect(TEMP_DEF dst, TEMP tmp1, TEMP tmp2); +++ format %{ "reduce_maxF $dst, $src1, $src2, $tmp1, $tmp2" %} +++ ins_encode %{ +++ __ reduce_minmax_FD_v($dst$$FloatRegister, +++ $src1$$FloatRegister, as_VectorRegister($src2$$reg), +++ as_VectorRegister($tmp1$$reg), as_VectorRegister($tmp2$$reg), +++ false /* is_double */, false /* is_min */); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++instruct vreduce_maxD(fRegD dst, fRegD src1, vReg src2, vReg tmp1, vReg tmp2) %{ +++ predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE); +++ match(Set dst (MaxReductionV src1 src2)); +++ ins_cost(VEC_COST); +++ effect(TEMP_DEF dst, TEMP tmp1, TEMP tmp2); +++ format %{ "reduce_maxD $dst, $src1, $src2, $tmp1, $tmp2" %} +++ ins_encode %{ +++ __ reduce_minmax_FD_v($dst$$FloatRegister, +++ $src1$$FloatRegister, as_VectorRegister($src2$$reg), +++ as_VectorRegister($tmp1$$reg), as_VectorRegister($tmp2$$reg), +++ true /* is_double */, false /* is_min */); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++// vector float min reduction +++ +++instruct vreduce_minF(fRegF dst, fRegF src1, vReg src2, vReg tmp1, vReg tmp2) %{ +++ predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT); +++ match(Set dst (MinReductionV src1 src2)); +++ ins_cost(VEC_COST); +++ effect(TEMP_DEF dst, TEMP tmp1, TEMP tmp2); +++ format %{ "reduce_minF $dst, $src1, $src2, $tmp1, $tmp2" %} +++ ins_encode %{ +++ __ reduce_minmax_FD_v($dst$$FloatRegister, +++ $src1$$FloatRegister, as_VectorRegister($src2$$reg), +++ as_VectorRegister($tmp1$$reg), as_VectorRegister($tmp2$$reg), +++ false /* is_double */, true /* is_min */); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++instruct vreduce_minD(fRegD dst, fRegD src1, vReg src2, vReg tmp1, vReg tmp2) %{ +++ predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE); +++ match(Set dst (MinReductionV src1 src2)); +++ ins_cost(VEC_COST); +++ effect(TEMP_DEF dst, TEMP tmp1, TEMP tmp2); +++ format %{ "reduce_minD $dst, $src1, $src2, $tmp1, $tmp2" %} +++ ins_encode %{ +++ __ reduce_minmax_FD_v($dst$$FloatRegister, +++ $src1$$FloatRegister, as_VectorRegister($src2$$reg), +++ as_VectorRegister($tmp1$$reg), as_VectorRegister($tmp2$$reg), +++ true /* is_double */, true /* is_min */); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++// vector replicate +++ +++instruct replicateB(vReg dst, iRegIorL2I src) %{ +++ match(Set dst (ReplicateB src)); +++ ins_cost(VEC_COST); +++ format %{ "vmv.v.x $dst, $src\t#@replicateB" %} +++ ins_encode %{ +++ __ vsetvli(t0, x0, Assembler::e8); +++ __ vmv_v_x(as_VectorRegister($dst$$reg), as_Register($src$$reg)); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++instruct replicateS(vReg dst, iRegIorL2I src) %{ +++ match(Set dst (ReplicateS src)); +++ ins_cost(VEC_COST); +++ format %{ "vmv.v.x $dst, $src\t#@replicateS" %} +++ ins_encode %{ +++ __ vsetvli(t0, x0, Assembler::e16); +++ __ vmv_v_x(as_VectorRegister($dst$$reg), as_Register($src$$reg)); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++instruct replicateI(vReg dst, iRegIorL2I src) %{ +++ match(Set dst (ReplicateI src)); +++ ins_cost(VEC_COST); +++ format %{ "vmv.v.x $dst, $src\t#@replicateI" %} +++ ins_encode %{ +++ __ vsetvli(t0, x0, Assembler::e32); +++ __ vmv_v_x(as_VectorRegister($dst$$reg), as_Register($src$$reg)); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++instruct replicateL(vReg dst, iRegL src) %{ +++ match(Set dst (ReplicateL src)); +++ ins_cost(VEC_COST); +++ format %{ "vmv.v.x $dst, $src\t#@replicateL" %} +++ ins_encode %{ +++ __ vsetvli(t0, x0, Assembler::e64); +++ __ vmv_v_x(as_VectorRegister($dst$$reg), as_Register($src$$reg)); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++instruct replicateB_imm5(vReg dst, immI5 con) %{ +++ match(Set dst (ReplicateB con)); +++ ins_cost(VEC_COST); +++ format %{ "vmv.v.i $dst, $con\t#@replicateB_imm5" %} +++ ins_encode %{ +++ __ vsetvli(t0, x0, Assembler::e8); +++ __ vmv_v_i(as_VectorRegister($dst$$reg), $con$$constant); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++instruct replicateS_imm5(vReg dst, immI5 con) %{ +++ match(Set dst (ReplicateS con)); +++ ins_cost(VEC_COST); +++ format %{ "vmv.v.i $dst, $con\t#@replicateS_imm5" %} +++ ins_encode %{ +++ __ vsetvli(t0, x0, Assembler::e16); +++ __ vmv_v_i(as_VectorRegister($dst$$reg), $con$$constant); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++instruct replicateI_imm5(vReg dst, immI5 con) %{ +++ match(Set dst (ReplicateI con)); +++ ins_cost(VEC_COST); +++ format %{ "vmv.v.i $dst, $con\t#@replicateI_imm5" %} +++ ins_encode %{ +++ __ vsetvli(t0, x0, Assembler::e32); +++ __ vmv_v_i(as_VectorRegister($dst$$reg), $con$$constant); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++instruct replicateL_imm5(vReg dst, immL5 con) %{ +++ match(Set dst (ReplicateL con)); +++ ins_cost(VEC_COST); +++ format %{ "vmv.v.i $dst, $con\t#@replicateL_imm5" %} +++ ins_encode %{ +++ __ vsetvli(t0, x0, Assembler::e64); +++ __ vmv_v_i(as_VectorRegister($dst$$reg), $con$$constant); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++instruct replicateF(vReg dst, fRegF src) %{ +++ match(Set dst (ReplicateF src)); +++ ins_cost(VEC_COST); +++ format %{ "vfmv.v.f $dst, $src\t#@replicateF" %} +++ ins_encode %{ +++ __ vsetvli(t0, x0, Assembler::e32); +++ __ vfmv_v_f(as_VectorRegister($dst$$reg), $src$$FloatRegister); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++instruct replicateD(vReg dst, fRegD src) %{ +++ match(Set dst (ReplicateD src)); +++ ins_cost(VEC_COST); +++ format %{ "vfmv.v.f $dst, $src\t#@replicateD" %} +++ ins_encode %{ +++ __ vsetvli(t0, x0, Assembler::e64); +++ __ vfmv_v_f(as_VectorRegister($dst$$reg), $src$$FloatRegister); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++// vector shift +++ +++instruct vasrB(vReg dst, vReg src, vReg shift) %{ +++ match(Set dst (RShiftVB src shift)); +++ ins_cost(VEC_COST); +++ effect(TEMP_DEF dst); +++ format %{ "vmsgtu.vi v0, $shift 7\t#@vasrB\n\t" +++ "vsra.vi $dst, $src, 7, Assembler::v0_t\n\t" +++ "vmnot.m v0, v0\n\t" +++ "vsra.vv $dst, $src, $shift, Assembler::v0_t" %} +++ ins_encode %{ +++ __ vsetvli(t0, x0, Assembler::e8); +++ // if shift > BitsPerByte - 1, clear the low BitsPerByte - 1 bits +++ __ vmsgtu_vi(v0, as_VectorRegister($shift$$reg), BitsPerByte - 1); +++ __ vsra_vi(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg), +++ BitsPerByte - 1, Assembler::v0_t); +++ // otherwise, shift +++ __ vmnot_m(v0, v0); +++ __ vsra_vv(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg), +++ as_VectorRegister($shift$$reg), Assembler::v0_t); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++instruct vasrS(vReg dst, vReg src, vReg shift) %{ +++ match(Set dst (RShiftVS src shift)); +++ ins_cost(VEC_COST); +++ effect(TEMP_DEF dst); +++ format %{ "vmsgtu.vi v0, $shift, 15\t#@vasrS\n\t" +++ "vsra.vi $dst, $src, 15, Assembler::v0_t\n\t" +++ "vmnot.m v0, v0\n\t" +++ "vsra.vv $dst, $src, $shift, Assembler::v0_t" %} +++ ins_encode %{ +++ __ vsetvli(t0, x0, Assembler::e16); +++ // if shift > BitsPerShort - 1, clear the low BitsPerShort - 1 bits +++ __ vmsgtu_vi(v0, as_VectorRegister($shift$$reg), BitsPerShort - 1); +++ __ vsra_vi(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg), +++ BitsPerShort - 1, Assembler::v0_t); +++ // otherwise, shift +++ __ vmnot_m(v0, v0); +++ __ vsra_vv(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg), +++ as_VectorRegister($shift$$reg), Assembler::v0_t); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++instruct vasrI(vReg dst, vReg src, vReg shift) %{ +++ match(Set dst (RShiftVI src shift)); +++ ins_cost(VEC_COST); +++ format %{ "vsra.vv $dst, $src, $shift\t#@vasrI" %} +++ ins_encode %{ +++ __ vsetvli(t0, x0, Assembler::e32); +++ __ vsra_vv(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg), +++ as_VectorRegister($shift$$reg)); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++instruct vasrL(vReg dst, vReg src, vReg shift) %{ +++ match(Set dst (RShiftVL src shift)); +++ ins_cost(VEC_COST); +++ format %{ "vsra.vv $dst, $src, $shift\t#@vasrL" %} +++ ins_encode %{ +++ __ vsetvli(t0, x0, Assembler::e64); +++ __ vsra_vv(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg), +++ as_VectorRegister($shift$$reg)); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++instruct vlslB(vReg dst, vReg src, vReg shift) %{ +++ match(Set dst (LShiftVB src shift)); +++ ins_cost(VEC_COST); +++ effect( TEMP_DEF dst); +++ format %{ "vmsgtu.vi v0, $shift, 7\t#@vlslB\n\t" +++ "vxor.vv $dst, $src, $src, Assembler::v0_t\n\t" +++ "vmnot.m v0, v0\n\t" +++ "vsll.vv $dst, $src, $shift, Assembler::v0_t" %} +++ ins_encode %{ +++ __ vsetvli(t0, x0, Assembler::e8); +++ // if shift > BitsPerByte - 1, clear the element +++ __ vmsgtu_vi(v0, as_VectorRegister($shift$$reg), BitsPerByte - 1); +++ __ vxor_vv(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg), +++ as_VectorRegister($src$$reg), Assembler::v0_t); +++ // otherwise, shift +++ __ vmnot_m(v0, v0); +++ __ vsll_vv(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg), +++ as_VectorRegister($shift$$reg), Assembler::v0_t); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++instruct vlslS(vReg dst, vReg src, vReg shift) %{ +++ match(Set dst (LShiftVS src shift)); +++ ins_cost(VEC_COST); +++ effect(TEMP_DEF dst); +++ format %{ "vmsgtu.vi v0, $shift, 15\t#@vlslS\n\t" +++ "vxor.vv $dst, $src, $src, Assembler::v0_t\n\t" +++ "vmnot.m v0, v0\n\t" +++ "vsll.vv $dst, $src, $shift, Assembler::v0_t" %} +++ ins_encode %{ +++ __ vsetvli(t0, x0, Assembler::e16); +++ // if shift > BitsPerShort - 1, clear the element +++ __ vmsgtu_vi(v0, as_VectorRegister($shift$$reg), BitsPerShort - 1); +++ __ vxor_vv(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg), +++ as_VectorRegister($src$$reg), Assembler::v0_t); +++ // otherwise, shift +++ __ vmnot_m(v0, v0); +++ __ vsll_vv(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg), +++ as_VectorRegister($shift$$reg), Assembler::v0_t); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++instruct vlslI(vReg dst, vReg src, vReg shift) %{ +++ match(Set dst (LShiftVI src shift)); +++ ins_cost(VEC_COST); +++ format %{ "vsll.vv $dst, $src, $shift\t#@vlslI" %} +++ ins_encode %{ +++ __ vsetvli(t0, x0, Assembler::e32); +++ __ vsll_vv(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg), +++ as_VectorRegister($shift$$reg)); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++instruct vlslL(vReg dst, vReg src, vReg shift) %{ +++ match(Set dst (LShiftVL src shift)); +++ ins_cost(VEC_COST); +++ format %{ "vsll.vv $dst, $src, $shift\t# vector (D)" %} +++ ins_encode %{ +++ __ vsetvli(t0, x0, Assembler::e64); +++ __ vsll_vv(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg), +++ as_VectorRegister($shift$$reg)); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++instruct vlsrB(vReg dst, vReg src, vReg shift) %{ +++ match(Set dst (URShiftVB src shift)); +++ ins_cost(VEC_COST); +++ effect(TEMP_DEF dst); +++ format %{ "vmsgtu.vi v0, $shift, 7\t#@vlsrB\n\t" +++ "vxor.vv $dst, $src, $src, Assembler::v0_t\n\t" +++ "vmnot.m v0, v0, v0\n\t" +++ "vsll.vv $dst, $src, $shift, Assembler::v0_t" %} +++ ins_encode %{ +++ __ vsetvli(t0, x0, Assembler::e8); +++ // if shift > BitsPerByte - 1, clear the element +++ __ vmsgtu_vi(v0, as_VectorRegister($shift$$reg), BitsPerByte - 1); +++ __ vxor_vv(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg), +++ as_VectorRegister($src$$reg), Assembler::v0_t); +++ // otherwise, shift +++ __ vmnot_m(v0, v0); +++ __ vsrl_vv(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg), +++ as_VectorRegister($shift$$reg), Assembler::v0_t); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++instruct vlsrS(vReg dst, vReg src, vReg shift) %{ +++ match(Set dst (URShiftVS src shift)); +++ ins_cost(VEC_COST); +++ effect(TEMP_DEF dst); +++ format %{ "vmsgtu.vi v0, $shift, 15\t#@vlsrS\n\t" +++ "vxor.vv $dst, $src, $src, Assembler::v0_t\n\t" +++ "vmnot.m v0, v0\n\t" +++ "vsll.vv $dst, $src, $shift, Assembler::v0_t" %} +++ ins_encode %{ +++ __ vsetvli(t0, x0, Assembler::e16); +++ // if shift > BitsPerShort - 1, clear the element +++ __ vmsgtu_vi(v0, as_VectorRegister($shift$$reg), BitsPerShort - 1); +++ __ vxor_vv(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg), +++ as_VectorRegister($src$$reg), Assembler::v0_t); +++ // otherwise, shift +++ __ vmnot_m(v0, v0); +++ __ vsrl_vv(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg), +++ as_VectorRegister($shift$$reg), Assembler::v0_t); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++ +++instruct vlsrI(vReg dst, vReg src, vReg shift) %{ +++ match(Set dst (URShiftVI src shift)); +++ ins_cost(VEC_COST); +++ format %{ "vsrl.vv $dst, $src, $shift\t#@vlsrI" %} +++ ins_encode %{ +++ __ vsetvli(t0, x0, Assembler::e32); +++ __ vsrl_vv(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg), +++ as_VectorRegister($shift$$reg)); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++ +++instruct vlsrL(vReg dst, vReg src, vReg shift) %{ +++ match(Set dst (URShiftVL src shift)); +++ ins_cost(VEC_COST); +++ format %{ "vsrl.vv $dst, $src, $shift\t#@vlsrL" %} +++ ins_encode %{ +++ __ vsetvli(t0, x0, Assembler::e64); +++ __ vsrl_vv(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg), +++ as_VectorRegister($shift$$reg)); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++instruct vasrB_imm(vReg dst, vReg src, immI shift) %{ +++ match(Set dst (RShiftVB src (RShiftCntV shift))); +++ ins_cost(VEC_COST); +++ format %{ "vsra.vi $dst, $src, $shift\t#@vasrB_imm" %} +++ ins_encode %{ +++ uint32_t con = (unsigned)$shift$$constant & 0x1f; +++ __ vsetvli(t0, x0, Assembler::e8); +++ if (con == 0) { +++ __ vor_vv(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg), +++ as_VectorRegister($src$$reg)); +++ return; +++ } +++ if (con >= BitsPerByte) con = BitsPerByte - 1; +++ __ vsra_vi(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg), con); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++instruct vasrS_imm(vReg dst, vReg src, immI shift) %{ +++ match(Set dst (RShiftVS src (RShiftCntV shift))); +++ ins_cost(VEC_COST); +++ format %{ "vsra.vi $dst, $src, $shift\t#@vasrS_imm" %} +++ ins_encode %{ +++ uint32_t con = (unsigned)$shift$$constant & 0x1f; +++ __ vsetvli(t0, x0, Assembler::e16); +++ if (con == 0) { +++ __ vor_vv(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg), +++ as_VectorRegister($src$$reg)); +++ return; +++ } +++ if (con >= BitsPerShort) con = BitsPerShort - 1; +++ __ vsra_vi(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg), con); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++instruct vasrI_imm(vReg dst, vReg src, immI shift) %{ +++ match(Set dst (RShiftVI src (RShiftCntV shift))); +++ ins_cost(VEC_COST); +++ format %{ "vsrl.vi $dst, $src, $shift\t#@vasrI_imm" %} +++ ins_encode %{ +++ uint32_t con = (unsigned)$shift$$constant & 0x1f; +++ __ vsetvli(t0, x0, Assembler::e32); +++ if (con == 0) { +++ __ vor_vv(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg), +++ as_VectorRegister($src$$reg)); +++ return; +++ } +++ __ vsra_vi(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg), con); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++instruct vasrL_imm(vReg dst, vReg src, immI shift) %{ +++ predicate((n->in(2)->in(1)->get_int() & 0x3f) < 32); +++ match(Set dst (RShiftVL src (RShiftCntV shift))); +++ ins_cost(VEC_COST); +++ format %{ "vsrl.vi $dst, $src, $shift\t#@vasrL_imm" %} +++ ins_encode %{ +++ uint32_t con = (unsigned)$shift$$constant & 0x1f; +++ __ vsetvli(t0, x0, Assembler::e64); +++ if (con == 0) { +++ __ vor_vv(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg), +++ as_VectorRegister($src$$reg)); +++ return; +++ } +++ __ vsra_vi(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg), con); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++instruct vlsrB_imm(vReg dst, vReg src, immI shift) %{ +++ match(Set dst (URShiftVB src (RShiftCntV shift))); +++ ins_cost(VEC_COST); +++ format %{ "vsrl.vi $dst, $src, $shift\t#@vlsrB_imm" %} +++ ins_encode %{ +++ uint32_t con = (unsigned)$shift$$constant & 0x1f; +++ __ vsetvli(t0, x0, Assembler::e8); +++ if (con == 0) { +++ __ vor_vv(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg), +++ as_VectorRegister($src$$reg)); +++ return; +++ } +++ if (con >= BitsPerByte) { +++ __ vxor_vv(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg), +++ as_VectorRegister($src$$reg)); +++ return; +++ } +++ __ vsrl_vi(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg), con); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++instruct vlsrS_imm(vReg dst, vReg src, immI shift) %{ +++ match(Set dst (URShiftVS src (RShiftCntV shift))); +++ ins_cost(VEC_COST); +++ format %{ "vsrl.vi $dst, $src, $shift\t#@vlsrS_imm" %} +++ ins_encode %{ +++ uint32_t con = (unsigned)$shift$$constant & 0x1f; +++ __ vsetvli(t0, x0, Assembler::e16); +++ if (con == 0) { +++ __ vor_vv(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg), +++ as_VectorRegister($src$$reg)); +++ return; +++ } +++ if (con >= BitsPerShort) { +++ __ vxor_vv(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg), +++ as_VectorRegister($src$$reg)); +++ return; +++ } +++ __ vsrl_vi(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg), con); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++instruct vlsrI_imm(vReg dst, vReg src, immI shift) %{ +++ match(Set dst (URShiftVI src (RShiftCntV shift))); +++ ins_cost(VEC_COST); +++ format %{ "vsrl.vi $dst, $src, $shift\t#@vlsrI_imm" %} +++ ins_encode %{ +++ uint32_t con = (unsigned)$shift$$constant & 0x1f; +++ __ vsetvli(t0, x0, Assembler::e32); +++ if (con == 0) { +++ __ vor_vv(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg), +++ as_VectorRegister($src$$reg)); +++ return; +++ } +++ __ vsrl_vi(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg), con); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++instruct vlsrL_imm(vReg dst, vReg src, immI shift) %{ +++ predicate((n->in(2)->in(1)->get_int() & 0x3f) < 32); +++ match(Set dst (URShiftVL src (RShiftCntV shift))); +++ ins_cost(VEC_COST); +++ format %{ "vsrl.vi $dst, $src, $shift\t#@vlsrL_imm" %} +++ ins_encode %{ +++ uint32_t con = (unsigned)$shift$$constant & 0x1f; +++ __ vsetvli(t0, x0, Assembler::e64); +++ if (con == 0) { +++ __ vor_vv(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg), +++ as_VectorRegister($src$$reg)); +++ return; +++ } +++ __ vsrl_vi(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg), con); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++instruct vlslB_imm(vReg dst, vReg src, immI shift) %{ +++ match(Set dst (LShiftVB src (LShiftCntV shift))); +++ ins_cost(VEC_COST); +++ format %{ "vsll.vi $dst, $src, $shift\t#@vlslB_imm" %} +++ ins_encode %{ +++ uint32_t con = (unsigned)$shift$$constant & 0x1f; +++ __ vsetvli(t0, x0, Assembler::e8); +++ if (con >= BitsPerByte) { +++ __ vxor_vv(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg), +++ as_VectorRegister($src$$reg)); +++ return; +++ } +++ __ vsll_vi(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg), con); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++instruct vlslS_imm(vReg dst, vReg src, immI shift) %{ +++ match(Set dst (LShiftVS src (LShiftCntV shift))); +++ ins_cost(VEC_COST); +++ format %{ "vsll.vi $dst, $src, $shift\t#@vlslS_imm" %} +++ ins_encode %{ +++ uint32_t con = (unsigned)$shift$$constant & 0x1f; +++ __ vsetvli(t0, x0, Assembler::e16); +++ if (con >= BitsPerShort) { +++ __ vxor_vv(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg), +++ as_VectorRegister($src$$reg)); +++ return; +++ } +++ __ vsll_vi(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg), con); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++instruct vlslI_imm(vReg dst, vReg src, immI shift) %{ +++ match(Set dst (LShiftVI src (LShiftCntV shift))); +++ ins_cost(VEC_COST); +++ format %{ "vsll.vi $dst, $src, $shift\t#@vlslI_imm" %} +++ ins_encode %{ +++ uint32_t con = (unsigned)$shift$$constant & 0x1f; +++ __ vsetvli(t0, x0, Assembler::e32); +++ __ vsll_vi(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg), con); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++instruct vlslL_imm(vReg dst, vReg src, immI shift) %{ +++ predicate((n->in(2)->in(1)->get_int() & 0x3f) < 32); +++ match(Set dst (LShiftVL src (LShiftCntV shift))); +++ ins_cost(VEC_COST); +++ format %{ "vsll.vi $dst, $src, $shift\t#@vlslL_imm" %} +++ ins_encode %{ +++ uint32_t con = (unsigned)$shift$$constant & 0x1f; +++ __ vsetvli(t0, x0, Assembler::e64); +++ __ vsll_vi(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg), con); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++instruct vshiftcntB(vReg dst, iRegIorL2I cnt) %{ +++ predicate(n->bottom_type()->is_vect()->element_basic_type() == T_BYTE); +++ match(Set dst (LShiftCntV cnt)); +++ match(Set dst (RShiftCntV cnt)); +++ format %{ "vmv.v.x $dst, $cnt\t#@vshiftcntB" %} +++ ins_encode %{ +++ __ vsetvli(t0, x0, Assembler::e8); +++ __ vmv_v_x(as_VectorRegister($dst$$reg), as_Register($cnt$$reg)); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++instruct vshiftcntS(vReg dst, iRegIorL2I cnt) %{ +++ predicate(n->bottom_type()->is_vect()->element_basic_type() == T_SHORT || +++ n->bottom_type()->is_vect()->element_basic_type() == T_CHAR); +++ match(Set dst (LShiftCntV cnt)); +++ match(Set dst (RShiftCntV cnt)); +++ format %{ "vmv.v.x $dst, $cnt\t#@vshiftcntS" %} +++ ins_encode %{ +++ __ vsetvli(t0, x0, Assembler::e16); +++ __ vmv_v_x(as_VectorRegister($dst$$reg), as_Register($cnt$$reg)); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++instruct vshiftcntI(vReg dst, iRegIorL2I cnt) %{ +++ predicate(n->bottom_type()->is_vect()->element_basic_type() == T_INT); +++ match(Set dst (LShiftCntV cnt)); +++ match(Set dst (RShiftCntV cnt)); +++ format %{ "vmv.v.x $dst, $cnt\t#@vshiftcntI" %} +++ ins_encode %{ +++ __ vsetvli(t0, x0, Assembler::e32); +++ __ vmv_v_x(as_VectorRegister($dst$$reg), as_Register($cnt$$reg)); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++instruct vshiftcntL(vReg dst, iRegIorL2I cnt) %{ +++ predicate(n->bottom_type()->is_vect()->element_basic_type() == T_LONG); +++ match(Set dst (LShiftCntV cnt)); +++ match(Set dst (RShiftCntV cnt)); +++ format %{ "vmv.v.x $dst, $cnt\t#@vshiftcntL" %} +++ ins_encode %{ +++ __ vsetvli(t0, x0, Assembler::e64); +++ __ vmv_v_x(as_VectorRegister($dst$$reg), as_Register($cnt$$reg)); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++// vector sqrt +++ +++instruct vsqrtF(vReg dst, vReg src) %{ +++ match(Set dst (SqrtVF src)); +++ ins_cost(VEC_COST); +++ format %{ "vfsqrt.v $dst, $src\t#@vsqrtF" %} +++ ins_encode %{ +++ __ vsetvli(t0, x0, Assembler::e32); +++ __ vfsqrt_v(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg)); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++instruct vsqrtD(vReg dst, vReg src) %{ +++ match(Set dst (SqrtVD src)); +++ ins_cost(VEC_COST); +++ format %{ "vfsqrt.v $dst, $src\t#@vsqrtD" %} +++ ins_encode %{ +++ __ vsetvli(t0, x0, Assembler::e64); +++ __ vfsqrt_v(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg)); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++// vector sub +++ +++instruct vsubB(vReg dst, vReg src1, vReg src2) %{ +++ match(Set dst (SubVB src1 src2)); +++ ins_cost(VEC_COST); +++ format %{ "vsub.vv $dst, $src1, $src2\t#@vsubB" %} +++ ins_encode %{ +++ __ vsetvli(t0, x0, Assembler::e8); +++ __ vsub_vv(as_VectorRegister($dst$$reg), as_VectorRegister($src1$$reg), +++ as_VectorRegister($src2$$reg)); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++instruct vsubS(vReg dst, vReg src1, vReg src2) %{ +++ match(Set dst (SubVS src1 src2)); +++ ins_cost(VEC_COST); +++ format %{ "vsub.vv $dst, $src1, $src2\t#@vsubS" %} +++ ins_encode %{ +++ __ vsetvli(t0, x0, Assembler::e16); +++ __ vsub_vv(as_VectorRegister($dst$$reg), as_VectorRegister($src1$$reg), +++ as_VectorRegister($src2$$reg)); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++instruct vsubI(vReg dst, vReg src1, vReg src2) %{ +++ match(Set dst (SubVI src1 src2)); +++ ins_cost(VEC_COST); +++ format %{ "vsub.vv $dst, $src1, $src2\t#@vsubI" %} +++ ins_encode %{ +++ __ vsetvli(t0, x0, Assembler::e32); +++ __ vsub_vv(as_VectorRegister($dst$$reg), as_VectorRegister($src1$$reg), +++ as_VectorRegister($src2$$reg)); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++instruct vsubL(vReg dst, vReg src1, vReg src2) %{ +++ match(Set dst (SubVL src1 src2)); +++ ins_cost(VEC_COST); +++ format %{ "vsub.vv $dst, $src1, $src2\t#@vsubL" %} +++ ins_encode %{ +++ __ vsetvli(t0, x0, Assembler::e64); +++ __ vsub_vv(as_VectorRegister($dst$$reg), as_VectorRegister($src1$$reg), +++ as_VectorRegister($src2$$reg)); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++instruct vsubF(vReg dst, vReg src1, vReg src2) %{ +++ match(Set dst (SubVF src1 src2)); +++ ins_cost(VEC_COST); +++ format %{ "vfsub.vv $dst, $src1, $src2\t@vsubF" %} +++ ins_encode %{ +++ __ vsetvli(t0, x0, Assembler::e32); +++ __ vfsub_vv(as_VectorRegister($dst$$reg), as_VectorRegister($src1$$reg), +++ as_VectorRegister($src2$$reg)); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++instruct vsubD(vReg dst, vReg src1, vReg src2) %{ +++ match(Set dst (SubVD src1 src2)); +++ ins_cost(VEC_COST); +++ format %{ "vfsub.vv $dst, $src1, $src2\t#@vsubD" %} +++ ins_encode %{ +++ __ vsetvli(t0, x0, Assembler::e64); +++ __ vfsub_vv(as_VectorRegister($dst$$reg), as_VectorRegister($src1$$reg), +++ as_VectorRegister($src2$$reg)); +++ %} +++ ins_pipe(pipe_slow); +++%} +++ +++instruct vstring_equalsL(iRegP_R11 str1, iRegP_R13 str2, iRegI_R14 cnt, +++ iRegI_R10 result, vReg_V1 v1, +++ vReg_V2 v2, vReg_V3 v3, rFlagsReg cr) +++%{ +++ predicate(UseRVV && ((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::LL); +++ match(Set result (StrEquals (Binary str1 str2) cnt)); +++ effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, TEMP v1, TEMP v2, TEMP v3, KILL cr); +++ +++ format %{ "String Equals $str1, $str2, $cnt -> $result\t#@string_equalsL" %} +++ ins_encode %{ +++ // Count is in 8-bit bytes; non-Compact chars are 16 bits. +++ __ string_equals_v($str1$$Register, $str2$$Register, +++ $result$$Register, $cnt$$Register, 1); +++ %} +++ ins_pipe(pipe_class_memory); +++%} +++ +++instruct vstring_equalsU(iRegP_R11 str1, iRegP_R13 str2, iRegI_R14 cnt, +++ iRegI_R10 result, vReg_V1 v1, +++ vReg_V2 v2, vReg_V3 v3, rFlagsReg cr) +++%{ +++ predicate(UseRVV && ((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::UU); +++ match(Set result (StrEquals (Binary str1 str2) cnt)); +++ effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, TEMP v1, TEMP v2, TEMP v3, KILL cr); +++ +++ format %{ "String Equals $str1, $str2, $cnt -> $result\t#@string_equalsU" %} +++ ins_encode %{ +++ // Count is in 8-bit bytes; non-Compact chars are 16 bits. +++ __ string_equals_v($str1$$Register, $str2$$Register, +++ $result$$Register, $cnt$$Register, 2); +++ %} +++ ins_pipe(pipe_class_memory); +++%} +++ +++instruct varray_equalsB(iRegP_R11 ary1, iRegP_R12 ary2, iRegI_R10 result, +++ vReg_V1 v1, vReg_V2 v2, vReg_V3 v3, iRegP_R28 tmp, rFlagsReg cr) +++%{ +++ predicate(UseRVV && ((AryEqNode*)n)->encoding() == StrIntrinsicNode::LL); +++ match(Set result (AryEq ary1 ary2)); +++ effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, TEMP v1, TEMP v2, TEMP v3, KILL cr); +++ +++ format %{ "Array Equals $ary1, ary2 -> $result\t#@array_equalsB // KILL $tmp" %} +++ ins_encode %{ +++ __ arrays_equals_v($ary1$$Register, $ary2$$Register, +++ $result$$Register, $tmp$$Register, 1); +++ %} +++ ins_pipe(pipe_class_memory); +++%} +++ +++instruct varray_equalsC(iRegP_R11 ary1, iRegP_R12 ary2, iRegI_R10 result, +++ vReg_V1 v1, vReg_V2 v2, vReg_V3 v3, iRegP_R28 tmp, rFlagsReg cr) +++%{ +++ predicate(UseRVV && ((AryEqNode*)n)->encoding() == StrIntrinsicNode::UU); +++ match(Set result (AryEq ary1 ary2)); +++ effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, TEMP v1, TEMP v2, TEMP v3, KILL cr); +++ +++ format %{ "Array Equals $ary1, ary2 -> $result\t#@array_equalsC // KILL $tmp" %} +++ ins_encode %{ +++ __ arrays_equals_v($ary1$$Register, $ary2$$Register, +++ $result$$Register, $tmp$$Register, 2); +++ %} +++ ins_pipe(pipe_class_memory); +++%} +++ +++instruct vstring_compareU(iRegP_R11 str1, iRegI_R12 cnt1, iRegP_R13 str2, iRegI_R14 cnt2, +++ iRegI_R10 result, vReg_V1 v1, vReg_V2 v2, vReg_V3 v3, vReg_V4 v4, vReg_V5 v5, +++ iRegP_R28 tmp1, iRegL_R29 tmp2) +++%{ +++ predicate(UseRVV && ((StrCompNode *)n)->encoding() == StrIntrinsicNode::UU); +++ match(Set result(StrComp(Binary str1 cnt1)(Binary str2 cnt2))); +++ effect(KILL tmp1, KILL tmp2, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, +++ TEMP v1, TEMP v2, TEMP v3, TEMP v4, TEMP v5); +++ +++ format %{ "String Compare $str1, $cnt1, $str2, $cnt2 -> $result\t#@string_compareU" %} +++ ins_encode %{ +++ // Count is in 8-bit bytes; non-Compact chars are 16 bits. +++ __ string_compare_v($str1$$Register, $str2$$Register, +++ $cnt1$$Register, $cnt2$$Register, $result$$Register, +++ $tmp1$$Register, $tmp2$$Register, +++ StrIntrinsicNode::UU); +++ %} +++ ins_pipe(pipe_class_memory); +++%} +++instruct vstring_compareL(iRegP_R11 str1, iRegI_R12 cnt1, iRegP_R13 str2, iRegI_R14 cnt2, +++ iRegI_R10 result, vReg_V1 v1, vReg_V2 v2, vReg_V3 v3, vReg_V4 v4, vReg_V5 v5, +++ iRegP_R28 tmp1, iRegL_R29 tmp2) +++%{ +++ predicate(UseRVV && ((StrCompNode *)n)->encoding() == StrIntrinsicNode::LL); +++ match(Set result(StrComp(Binary str1 cnt1)(Binary str2 cnt2))); +++ effect(KILL tmp1, KILL tmp2, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, +++ TEMP v1, TEMP v2, TEMP v3, TEMP v4, TEMP v5); +++ +++ format %{ "String Compare $str1, $cnt1, $str2, $cnt2 -> $result\t#@string_compareL" %} +++ ins_encode %{ +++ __ string_compare_v($str1$$Register, $str2$$Register, +++ $cnt1$$Register, $cnt2$$Register, $result$$Register, +++ $tmp1$$Register, $tmp2$$Register, +++ StrIntrinsicNode::LL); +++ %} +++ ins_pipe(pipe_class_memory); +++%} +++ +++instruct vstring_compareUL(iRegP_R11 str1, iRegI_R12 cnt1, iRegP_R13 str2, iRegI_R14 cnt2, +++ iRegI_R10 result, vReg_V1 v1, vReg_V2 v2, vReg_V3 v3, vReg_V4 v4, vReg_V5 v5, +++ iRegP_R28 tmp1, iRegL_R29 tmp2) +++%{ +++ predicate(UseRVV && ((StrCompNode *)n)->encoding() == StrIntrinsicNode::UL); +++ match(Set result(StrComp(Binary str1 cnt1)(Binary str2 cnt2))); +++ effect(KILL tmp1, KILL tmp2, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, +++ TEMP v1, TEMP v2, TEMP v3, TEMP v4, TEMP v5); +++ +++ format %{"String Compare $str1, $cnt1, $str2, $cnt2 -> $result\t#@string_compareUL" %} +++ ins_encode %{ +++ __ string_compare_v($str1$$Register, $str2$$Register, +++ $cnt1$$Register, $cnt2$$Register, $result$$Register, +++ $tmp1$$Register, $tmp2$$Register, +++ StrIntrinsicNode::UL); +++ %} +++ ins_pipe(pipe_class_memory); +++%} +++instruct vstring_compareLU(iRegP_R11 str1, iRegI_R12 cnt1, iRegP_R13 str2, iRegI_R14 cnt2, +++ iRegI_R10 result, vReg_V1 v1, vReg_V2 v2, vReg_V3 v3, vReg_V4 v4, vReg_V5 v5, +++ iRegP_R28 tmp1, iRegL_R29 tmp2) +++%{ +++ predicate(UseRVV && ((StrCompNode *)n)->encoding() == StrIntrinsicNode::LU); +++ match(Set result(StrComp(Binary str1 cnt1)(Binary str2 cnt2))); +++ effect(KILL tmp1, KILL tmp2, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, +++ TEMP v1, TEMP v2, TEMP v3, TEMP v4, TEMP v5); +++ +++ format %{ "String Compare $str1, $cnt1, $str2, $cnt2 -> $result\t#@string_compareLU" %} +++ ins_encode %{ +++ __ string_compare_v($str1$$Register, $str2$$Register, +++ $cnt1$$Register, $cnt2$$Register, $result$$Register, +++ $tmp1$$Register, $tmp2$$Register, +++ StrIntrinsicNode::LU); +++ %} +++ ins_pipe(pipe_class_memory); +++%} +++ +++// fast byte[] to char[] inflation +++instruct vstring_inflate(Universe dummy, iRegP_R10 src, iRegP_R11 dst, iRegI_R12 len, +++ vReg_V1 v1, vReg_V2 v2, vReg_V3 v3, iRegLNoSp tmp) +++%{ +++ predicate(UseRVV); +++ match(Set dummy (StrInflatedCopy src (Binary dst len))); +++ effect(TEMP v1, TEMP v2, TEMP v3, TEMP tmp, USE_KILL src, USE_KILL dst, USE_KILL len); +++ +++ format %{ "String Inflate $src,$dst" %} +++ ins_encode %{ +++ __ byte_array_inflate_v($src$$Register, $dst$$Register, $len$$Register, $tmp$$Register); +++ %} +++ ins_pipe(pipe_class_memory); +++%} +++ +++// encode char[] to byte[] in ISO_8859_1 +++instruct vencode_iso_array(iRegP_R12 src, iRegP_R11 dst, iRegI_R13 len, iRegI_R10 result, +++ vReg_V1 v1, vReg_V2 v2, vReg_V3 v3, iRegLNoSp tmp) +++%{ +++ predicate(UseRVV); +++ match(Set result (EncodeISOArray src (Binary dst len))); +++ effect(TEMP_DEF result, USE_KILL src, USE_KILL dst, USE_KILL len, +++ TEMP v1, TEMP v2, TEMP v3, TEMP tmp); +++ +++ format %{ "Encode array $src,$dst,$len -> $result" %} +++ ins_encode %{ +++ __ encode_iso_array_v($src$$Register, $dst$$Register, $len$$Register, +++ $result$$Register, $tmp$$Register); +++ %} +++ ins_pipe( pipe_class_memory ); +++%} +++ +++// fast char[] to byte[] compression +++instruct vstring_compress(iRegP_R12 src, iRegP_R11 dst, iRegI_R13 len, iRegI_R10 result, +++ vReg_V1 v1, vReg_V2 v2, vReg_V3 v3, iRegLNoSp tmp) +++%{ +++ predicate(UseRVV); +++ match(Set result (StrCompressedCopy src (Binary dst len))); +++ effect(TEMP_DEF result, USE_KILL src, USE_KILL dst, USE_KILL len, +++ TEMP v1, TEMP v2, TEMP v3, TEMP tmp); +++ +++ format %{ "String Compress $src,$dst -> $result // KILL R11, R12, R13" %} +++ ins_encode %{ +++ __ char_array_compress_v($src$$Register, $dst$$Register, $len$$Register, +++ $result$$Register, $tmp$$Register); +++ %} +++ ins_pipe( pipe_slow ); +++%} +++ +++instruct vhas_negatives(iRegP_R11 ary, iRegI_R12 len, iRegI_R10 result, +++ vReg_V1 v1, vReg_V2 v2, vReg_V3 v3, iRegLNoSp tmp) +++%{ +++ predicate(UseRVV); +++ match(Set result (HasNegatives ary len)); +++ effect(TEMP_DEF result, USE_KILL ary, USE_KILL len, TEMP v1, TEMP v2, TEMP v3, TEMP tmp); +++ +++ format %{ "has negatives byte[] $ary, $len -> $result" %} +++ ins_encode %{ +++ __ has_negatives_v($ary$$Register, $len$$Register, $result$$Register, $tmp$$Register); +++ %} +++ +++ ins_pipe(pipe_slow); +++%} +++ +++instruct vstringU_indexof_char(iRegP_R11 str1, iRegI_R12 cnt1, iRegI_R13 ch, +++ iRegI_R10 result, iRegINoSp tmp1, iRegINoSp tmp2, +++ vReg_V1 v1, vReg_V2 v2, vReg_V3 v3) +++%{ +++ predicate(UseRVV && (((StrIndexOfCharNode*)n)->encoding() == StrIntrinsicNode::U)); +++ match(Set result (StrIndexOfChar (Binary str1 cnt1) ch)); +++ effect(TEMP_DEF result, USE_KILL str1, USE_KILL cnt1, USE_KILL ch, +++ TEMP tmp1, TEMP tmp2, TEMP v1, TEMP v2, TEMP v3); +++ +++ format %{ "StringUTF16 IndexOf char[] $str1, $cnt1, $ch -> $result" %} +++ +++ ins_encode %{ +++ __ string_indexof_char_v($str1$$Register, $cnt1$$Register, $ch$$Register, +++ $result$$Register, $tmp1$$Register, $tmp2$$Register, +++ false /* isL */); +++ %} +++ +++ ins_pipe(pipe_class_memory); +++%} +++ +++instruct vstringL_indexof_char(iRegP_R11 str1, iRegI_R12 cnt1, iRegI_R13 ch, +++ iRegI_R10 result, iRegINoSp tmp1, iRegINoSp tmp2, +++ vReg_V1 v1, vReg_V2 v2, vReg_V3 v3) +++%{ +++ predicate(UseRVV && (((StrIndexOfCharNode*)n)->encoding() == StrIntrinsicNode::L)); +++ match(Set result (StrIndexOfChar (Binary str1 cnt1) ch)); +++ effect(TEMP_DEF result, USE_KILL str1, USE_KILL cnt1, USE_KILL ch, +++ TEMP tmp1, TEMP tmp2, TEMP v1, TEMP v2, TEMP v3); +++ +++ format %{ "StringLatin1 IndexOf char[] $str1, $cnt1, $ch -> $result" %} +++ +++ ins_encode %{ +++ __ string_indexof_char_v($str1$$Register, $cnt1$$Register, $ch$$Register, +++ $result$$Register, $tmp1$$Register, $tmp2$$Register, +++ true /* isL */); +++ %} +++ +++ ins_pipe(pipe_class_memory); +++%} +++ +++// clearing of an array +++instruct vclearArray_reg_reg(iRegL_R29 cnt, iRegP_R28 base, Universe dummy, +++ vReg_V1 vReg1, vReg_V2 vReg2, vReg_V3 vReg3) +++%{ +++ predicate(UseRVV); +++ match(Set dummy (ClearArray cnt base)); +++ effect(USE_KILL cnt, USE_KILL base, TEMP vReg1, TEMP vReg2, TEMP vReg3); +++ +++ format %{ "ClearArray $cnt, $base\t#@clearArray_reg_reg" %} +++ +++ ins_encode %{ +++ __ clear_array_v($base$$Register, $cnt$$Register); +++ %} +++ +++ ins_pipe(pipe_class_memory); +++%} ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/sharedRuntime_riscv.cpp ++@@ -0,0 +1,2966 @@ +++/* +++ * Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved. +++ * Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#include "precompiled.hpp" +++#include "asm/macroAssembler.hpp" +++#include "asm/macroAssembler.inline.hpp" +++#include "code/debugInfoRec.hpp" +++#include "code/icBuffer.hpp" +++#include "code/vtableStubs.hpp" +++#include "compiler/oopMap.hpp" +++#include "gc/shared/barrierSetAssembler.hpp" +++#include "interpreter/interp_masm.hpp" +++#include "interpreter/interpreter.hpp" +++#include "logging/log.hpp" +++#include "memory/resourceArea.hpp" +++#include "nativeInst_riscv.hpp" +++#include "oops/compiledICHolder.hpp" +++#include "oops/klass.inline.hpp" +++#include "prims/methodHandles.hpp" +++#include "runtime/jniHandles.hpp" +++#include "runtime/safepointMechanism.hpp" +++#include "runtime/sharedRuntime.hpp" +++#include "runtime/signature.hpp" +++#include "runtime/stubRoutines.hpp" +++#include "runtime/vframeArray.hpp" +++#include "utilities/align.hpp" +++#include "utilities/formatBuffer.hpp" +++#include "vmreg_riscv.inline.hpp" +++#ifdef COMPILER1 +++#include "c1/c1_Runtime1.hpp" +++#endif +++#ifdef COMPILER2 +++#include "adfiles/ad_riscv.hpp" +++#include "opto/runtime.hpp" +++#endif +++ +++#define __ masm-> +++ +++const int StackAlignmentInSlots = StackAlignmentInBytes / VMRegImpl::stack_slot_size; +++ +++class SimpleRuntimeFrame { +++public: +++ +++ // Most of the runtime stubs have this simple frame layout. +++ // This class exists to make the layout shared in one place. +++ // Offsets are for compiler stack slots, which are jints. +++ enum layout { +++ // The frame sender code expects that fp will be in the "natural" place and +++ // will override any oopMap setting for it. We must therefore force the layout +++ // so that it agrees with the frame sender code. +++ // we don't expect any arg reg save area so riscv asserts that +++ // frame::arg_reg_save_area_bytes == 0 +++ fp_off = 0, fp_off2, +++ return_off, return_off2, +++ framesize +++ }; +++}; +++ +++class RegisterSaver { +++ const bool _save_vectors; +++ public: +++ RegisterSaver(bool save_vectors) : _save_vectors(UseRVV && save_vectors) {} +++ ~RegisterSaver() {} +++ OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words); +++ void restore_live_registers(MacroAssembler* masm); +++ +++ // Offsets into the register save area +++ // Used by deoptimization when it is managing result register +++ // values on its own +++ // gregs:28, float_register:32; except: x1(ra) & x2(sp) & gp(x3) & tp(x4) +++ // |---v0---|<---SP +++ // |---v1---|save vectors only in generate_handler_blob +++ // |-- .. --| +++ // |---v31--|----- +++ // |---f0---| +++ // |---f1---| +++ // | .. | +++ // |---f31--| +++ // |---reserved slot for stack alignment---| +++ // |---x5---| +++ // | x6 | +++ // |---.. --| +++ // |---x31--| +++ // |---fp---| +++ // |---ra---| +++ int v0_offset_in_bytes(void) { return 0; } +++ int f0_offset_in_bytes(void) { +++ int f0_offset = 0; +++#ifdef COMPILER2 +++ if (_save_vectors) { +++ f0_offset += Matcher::scalable_vector_reg_size(T_INT) * VectorRegisterImpl::number_of_registers * +++ BytesPerInt; +++ } +++#endif +++ return f0_offset; +++ } +++ int reserved_slot_offset_in_bytes(void) { +++ return f0_offset_in_bytes() + +++ FloatRegisterImpl::max_slots_per_register * +++ FloatRegisterImpl::number_of_registers * +++ BytesPerInt; +++ } +++ +++ int reg_offset_in_bytes(Register r) { +++ assert (r->encoding() > 4, "ra, sp, gp and tp not saved"); +++ return reserved_slot_offset_in_bytes() + (r->encoding() - 4 /* x1, x2, x3, x4 */) * wordSize; +++ } +++ +++ int freg_offset_in_bytes(FloatRegister f) { +++ return f0_offset_in_bytes() + f->encoding() * wordSize; +++ } +++ +++ int ra_offset_in_bytes(void) { +++ return reserved_slot_offset_in_bytes() + +++ (RegisterImpl::number_of_registers - 3) * +++ RegisterImpl::max_slots_per_register * +++ BytesPerInt; +++ } +++}; +++ +++OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words) { +++ int vector_size_in_bytes = 0; +++ int vector_size_in_slots = 0; +++#ifdef COMPILER2 +++ if (_save_vectors) { +++ vector_size_in_bytes += Matcher::scalable_vector_reg_size(T_BYTE); +++ vector_size_in_slots += Matcher::scalable_vector_reg_size(T_INT); +++ } +++#endif +++ +++ int frame_size_in_bytes = align_up(additional_frame_words * wordSize + ra_offset_in_bytes() + wordSize, 16); +++ // OopMap frame size is in compiler stack slots (jint's) not bytes or words +++ int frame_size_in_slots = frame_size_in_bytes / BytesPerInt; +++ // The caller will allocate additional_frame_words +++ int additional_frame_slots = additional_frame_words * wordSize / BytesPerInt; +++ // CodeBlob frame size is in words. +++ int frame_size_in_words = frame_size_in_bytes / wordSize; +++ *total_frame_words = frame_size_in_words; +++ +++ // Save Integer, Float and Vector registers. +++ __ enter(); +++ __ push_CPU_state(_save_vectors, vector_size_in_bytes); +++ +++ // Set an oopmap for the call site. This oopmap will map all +++ // oop-registers and debug-info registers as callee-saved. This +++ // will allow deoptimization at this safepoint to find all possible +++ // debug-info recordings, as well as let GC find all oops. +++ +++ OopMapSet *oop_maps = new OopMapSet(); +++ OopMap* oop_map = new OopMap(frame_size_in_slots, 0); +++ assert_cond(oop_maps != NULL && oop_map != NULL); +++ +++ int sp_offset_in_slots = 0; +++ int step_in_slots = 0; +++ if (_save_vectors) { +++ step_in_slots = vector_size_in_slots; +++ for (int i = 0; i < VectorRegisterImpl::number_of_registers; i++, sp_offset_in_slots += step_in_slots) { +++ VectorRegister r = as_VectorRegister(i); +++ oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset_in_slots), r->as_VMReg()); +++ } +++ } +++ +++ step_in_slots = FloatRegisterImpl::max_slots_per_register; +++ for (int i = 0; i < FloatRegisterImpl::number_of_registers; i++, sp_offset_in_slots += step_in_slots) { +++ FloatRegister r = as_FloatRegister(i); +++ oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset_in_slots), r->as_VMReg()); +++ } +++ +++ step_in_slots = RegisterImpl::max_slots_per_register; +++ // skip the slot reserved for alignment, see MacroAssembler::push_reg; +++ // also skip x5 ~ x6 on the stack because they are caller-saved registers. +++ sp_offset_in_slots += RegisterImpl::max_slots_per_register * 3; +++ // besides, we ignore x0 ~ x4 because push_CPU_state won't push them on the stack. +++ for (int i = 7; i < RegisterImpl::number_of_registers; i++, sp_offset_in_slots += step_in_slots) { +++ Register r = as_Register(i); +++ if (r != xthread) { +++ oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset_in_slots + additional_frame_slots), r->as_VMReg()); +++ } +++ } +++ +++ return oop_map; +++} +++ +++void RegisterSaver::restore_live_registers(MacroAssembler* masm) { +++#ifdef COMPILER2 +++ __ pop_CPU_state(_save_vectors, Matcher::scalable_vector_reg_size(T_BYTE)); +++#else +++ __ pop_CPU_state(_save_vectors); +++#endif +++ __ leave(); +++} +++ +++// Is vector's size (in bytes) bigger than a size saved by default? +++// riscv does not ovlerlay the floating-point registers on vector registers like aarch64. +++bool SharedRuntime::is_wide_vector(int size) { +++ return UseRVV; +++} +++ +++// The java_calling_convention describes stack locations as ideal slots on +++// a frame with no abi restrictions. Since we must observe abi restrictions +++// (like the placement of the register window) the slots must be biased by +++// the following value. +++static int reg2offset_in(VMReg r) { +++ // Account for saved fp and ra +++ // This should really be in_preserve_stack_slots +++ return r->reg2stack() * VMRegImpl::stack_slot_size; +++} +++ +++static int reg2offset_out(VMReg r) { +++ return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size; +++} +++ +++// --------------------------------------------------------------------------- +++// Read the array of BasicTypes from a signature, and compute where the +++// arguments should go. Values in the VMRegPair regs array refer to 4-byte +++// quantities. Values less than VMRegImpl::stack0 are registers, those above +++// refer to 4-byte stack slots. All stack slots are based off of the stack pointer +++// as framesizes are fixed. +++// VMRegImpl::stack0 refers to the first slot 0(sp). +++// and VMRegImpl::stack0+1 refers to the memory word 4-byes higher. Register +++// up to RegisterImpl::number_of_registers) are the 64-bit +++// integer registers. +++ +++// Note: the INPUTS in sig_bt are in units of Java argument words, +++// which are 64-bit. The OUTPUTS are in 32-bit units. +++ +++// The Java calling convention is a "shifted" version of the C ABI. +++// By skipping the first C ABI register we can call non-static jni +++// methods with small numbers of arguments without having to shuffle +++// the arguments at all. Since we control the java ABI we ought to at +++// least get some advantage out of it. +++ +++int SharedRuntime::java_calling_convention(const BasicType *sig_bt, +++ VMRegPair *regs, +++ int total_args_passed) { +++ // Create the mapping between argument positions and +++ // registers. +++ static const Register INT_ArgReg[Argument::n_int_register_parameters_j] = { +++ j_rarg0, j_rarg1, j_rarg2, j_rarg3, +++ j_rarg4, j_rarg5, j_rarg6, j_rarg7 +++ }; +++ static const FloatRegister FP_ArgReg[Argument::n_float_register_parameters_j] = { +++ j_farg0, j_farg1, j_farg2, j_farg3, +++ j_farg4, j_farg5, j_farg6, j_farg7 +++ }; +++ +++ uint int_args = 0; +++ uint fp_args = 0; +++ uint stk_args = 0; // inc by 2 each time +++ +++ for (int i = 0; i < total_args_passed; i++) { +++ switch (sig_bt[i]) { +++ case T_BOOLEAN: // fall through +++ case T_CHAR: // fall through +++ case T_BYTE: // fall through +++ case T_SHORT: // fall through +++ case T_INT: +++ if (int_args < Argument::n_int_register_parameters_j) { +++ regs[i].set1(INT_ArgReg[int_args++]->as_VMReg()); +++ } else { +++ regs[i].set1(VMRegImpl::stack2reg(stk_args)); +++ stk_args += 2; +++ } +++ break; +++ case T_VOID: +++ // halves of T_LONG or T_DOUBLE +++ assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half"); +++ regs[i].set_bad(); +++ break; +++ case T_LONG: // fall through +++ assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half"); +++ case T_OBJECT: // fall through +++ case T_ARRAY: // fall through +++ case T_ADDRESS: +++ if (int_args < Argument::n_int_register_parameters_j) { +++ regs[i].set2(INT_ArgReg[int_args++]->as_VMReg()); +++ } else { +++ regs[i].set2(VMRegImpl::stack2reg(stk_args)); +++ stk_args += 2; +++ } +++ break; +++ case T_FLOAT: +++ if (fp_args < Argument::n_float_register_parameters_j) { +++ regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg()); +++ } else { +++ regs[i].set1(VMRegImpl::stack2reg(stk_args)); +++ stk_args += 2; +++ } +++ break; +++ case T_DOUBLE: +++ assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half"); +++ if (fp_args < Argument::n_float_register_parameters_j) { +++ regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg()); +++ } else { +++ regs[i].set2(VMRegImpl::stack2reg(stk_args)); +++ stk_args += 2; +++ } +++ break; +++ default: +++ ShouldNotReachHere(); +++ } +++ } +++ +++ return align_up(stk_args, 2); +++} +++ +++// Patch the callers callsite with entry to compiled code if it exists. +++static void patch_callers_callsite(MacroAssembler *masm) { +++ Label L; +++ __ ld(t0, Address(xmethod, in_bytes(Method::code_offset()))); +++ __ beqz(t0, L); +++ +++ __ enter(); +++ __ push_CPU_state(); +++ +++ // VM needs caller's callsite +++ // VM needs target method +++ // This needs to be a long call since we will relocate this adapter to +++ // the codeBuffer and it may not reach +++ +++#ifndef PRODUCT +++ assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area"); +++#endif +++ +++ __ mv(c_rarg0, xmethod); +++ __ mv(c_rarg1, ra); +++ RuntimeAddress target(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)); +++ __ relocate(target.rspec(), [&] { +++ int32_t offset; +++ __ la_patchable(t0, target, offset); +++ __ jalr(x1, t0, offset); +++ }); +++ +++ __ pop_CPU_state(); +++ // restore sp +++ __ leave(); +++ __ bind(L); +++} +++ +++static void gen_c2i_adapter(MacroAssembler *masm, +++ int total_args_passed, +++ int comp_args_on_stack, +++ const BasicType *sig_bt, +++ const VMRegPair *regs, +++ Label& skip_fixup) { +++ // Before we get into the guts of the C2I adapter, see if we should be here +++ // at all. We've come from compiled code and are attempting to jump to the +++ // interpreter, which means the caller made a static call to get here +++ // (vcalls always get a compiled target if there is one). Check for a +++ // compiled target. If there is one, we need to patch the caller's call. +++ patch_callers_callsite(masm); +++ +++ __ bind(skip_fixup); +++ +++ int words_pushed = 0; +++ +++ // Since all args are passed on the stack, total_args_passed * +++ // Interpreter::stackElementSize is the space we need. +++ +++ int extraspace = total_args_passed * Interpreter::stackElementSize; +++ +++ __ mv(x30, sp); +++ +++ // stack is aligned, keep it that way +++ extraspace = align_up(extraspace, 2 * wordSize); +++ +++ if (extraspace) { +++ __ sub(sp, sp, extraspace); +++ } +++ +++ // Now write the args into the outgoing interpreter space +++ for (int i = 0; i < total_args_passed; i++) { +++ if (sig_bt[i] == T_VOID) { +++ assert(i > 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "missing half"); +++ continue; +++ } +++ +++ // offset to start parameters +++ int st_off = (total_args_passed - i - 1) * Interpreter::stackElementSize; +++ int next_off = st_off - Interpreter::stackElementSize; +++ +++ // Say 4 args: +++ // i st_off +++ // 0 32 T_LONG +++ // 1 24 T_VOID +++ // 2 16 T_OBJECT +++ // 3 8 T_BOOL +++ // - 0 return address +++ // +++ // However to make thing extra confusing. Because we can fit a Java long/double in +++ // a single slot on a 64 bt vm and it would be silly to break them up, the interpreter +++ // leaves one slot empty and only stores to a single slot. In this case the +++ // slot that is occupied is the T_VOID slot. See I said it was confusing. +++ +++ VMReg r_1 = regs[i].first(); +++ VMReg r_2 = regs[i].second(); +++ if (!r_1->is_valid()) { +++ assert(!r_2->is_valid(), ""); +++ continue; +++ } +++ if (r_1->is_stack()) { +++ // memory to memory use t0 +++ int ld_off = (r_1->reg2stack() * VMRegImpl::stack_slot_size +++ + extraspace +++ + words_pushed * wordSize); +++ if (!r_2->is_valid()) { +++ __ lwu(t0, Address(sp, ld_off)); +++ __ sd(t0, Address(sp, st_off), /*temp register*/esp); +++ } else { +++ __ ld(t0, Address(sp, ld_off), /*temp register*/esp); +++ +++ // Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG +++ // T_DOUBLE and T_LONG use two slots in the interpreter +++ if (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) { +++ // ld_off == LSW, ld_off+wordSize == MSW +++ // st_off == MSW, next_off == LSW +++ __ sd(t0, Address(sp, next_off), /*temp register*/esp); +++#ifdef ASSERT +++ // Overwrite the unused slot with known junk +++ __ mv(t0, 0xdeadffffdeadaaaaul); +++ __ sd(t0, Address(sp, st_off), /*temp register*/esp); +++#endif /* ASSERT */ +++ } else { +++ __ sd(t0, Address(sp, st_off), /*temp register*/esp); +++ } +++ } +++ } else if (r_1->is_Register()) { +++ Register r = r_1->as_Register(); +++ if (!r_2->is_valid()) { +++ // must be only an int (or less ) so move only 32bits to slot +++ __ sd(r, Address(sp, st_off)); +++ } else { +++ // Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG +++ // T_DOUBLE and T_LONG use two slots in the interpreter +++ if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) { +++ // long/double in gpr +++#ifdef ASSERT +++ // Overwrite the unused slot with known junk +++ __ mv(t0, 0xdeadffffdeadaaabul); +++ __ sd(t0, Address(sp, st_off), /*temp register*/esp); +++#endif /* ASSERT */ +++ __ sd(r, Address(sp, next_off)); +++ } else { +++ __ sd(r, Address(sp, st_off)); +++ } +++ } +++ } else { +++ assert(r_1->is_FloatRegister(), ""); +++ if (!r_2->is_valid()) { +++ // only a float use just part of the slot +++ __ fsw(r_1->as_FloatRegister(), Address(sp, st_off)); +++ } else { +++#ifdef ASSERT +++ // Overwrite the unused slot with known junk +++ __ mv(t0, 0xdeadffffdeadaaacul); +++ __ sd(t0, Address(sp, st_off), /*temp register*/esp); +++#endif /* ASSERT */ +++ __ fsd(r_1->as_FloatRegister(), Address(sp, next_off)); +++ } +++ } +++ } +++ +++ __ mv(esp, sp); // Interp expects args on caller's expression stack +++ +++ __ ld(t0, Address(xmethod, in_bytes(Method::interpreter_entry_offset()))); +++ __ jr(t0); +++} +++ +++void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm, +++ int total_args_passed, +++ int comp_args_on_stack, +++ const BasicType *sig_bt, +++ const VMRegPair *regs) { +++ // Cut-out for having no stack args. +++ int comp_words_on_stack = align_up(comp_args_on_stack * VMRegImpl::stack_slot_size, wordSize) >> LogBytesPerWord; +++ if (comp_args_on_stack != 0) { +++ __ sub(t0, sp, comp_words_on_stack * wordSize); +++ __ andi(sp, t0, -16); +++ } +++ +++ // Will jump to the compiled code just as if compiled code was doing it. +++ // Pre-load the register-jump target early, to schedule it better. +++ __ ld(t1, Address(xmethod, in_bytes(Method::from_compiled_offset()))); +++ +++ // Now generate the shuffle code. +++ for (int i = 0; i < total_args_passed; i++) { +++ if (sig_bt[i] == T_VOID) { +++ assert(i > 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "missing half"); +++ continue; +++ } +++ +++ // Pick up 0, 1 or 2 words from SP+offset. +++ +++ assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(), +++ "scrambled load targets?"); +++ // Load in argument order going down. +++ int ld_off = (total_args_passed - i - 1) * Interpreter::stackElementSize; +++ // Point to interpreter value (vs. tag) +++ int next_off = ld_off - Interpreter::stackElementSize; +++ +++ VMReg r_1 = regs[i].first(); +++ VMReg r_2 = regs[i].second(); +++ if (!r_1->is_valid()) { +++ assert(!r_2->is_valid(), ""); +++ continue; +++ } +++ if (r_1->is_stack()) { +++ // Convert stack slot to an SP offset (+ wordSize to account for return address ) +++ int st_off = regs[i].first()->reg2stack() * VMRegImpl::stack_slot_size; +++ if (!r_2->is_valid()) { +++ __ lw(t0, Address(esp, ld_off)); +++ __ sd(t0, Address(sp, st_off), /*temp register*/t2); +++ } else { +++ // +++ // We are using two optoregs. This can be either T_OBJECT, +++ // T_ADDRESS, T_LONG, or T_DOUBLE the interpreter allocates +++ // two slots but only uses one for thr T_LONG or T_DOUBLE case +++ // So we must adjust where to pick up the data to match the +++ // interpreter. +++ // +++ // Interpreter local[n] == MSW, local[n+1] == LSW however locals +++ // are accessed as negative so LSW is at LOW address +++ +++ // ld_off is MSW so get LSW +++ const int offset = (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) ? +++ next_off : ld_off; +++ __ ld(t0, Address(esp, offset)); +++ // st_off is LSW (i.e. reg.first()) +++ __ sd(t0, Address(sp, st_off), /*temp register*/t2); +++ } +++ } else if (r_1->is_Register()) { // Register argument +++ Register r = r_1->as_Register(); +++ if (r_2->is_valid()) { +++ // +++ // We are using two VMRegs. This can be either T_OBJECT, +++ // T_ADDRESS, T_LONG, or T_DOUBLE the interpreter allocates +++ // two slots but only uses one for thr T_LONG or T_DOUBLE case +++ // So we must adjust where to pick up the data to match the +++ // interpreter. +++ +++ const int offset = (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) ? +++ next_off : ld_off; +++ +++ // this can be a misaligned move +++ __ ld(r, Address(esp, offset)); +++ } else { +++ // sign extend and use a full word? +++ __ lw(r, Address(esp, ld_off)); +++ } +++ } else { +++ if (!r_2->is_valid()) { +++ __ flw(r_1->as_FloatRegister(), Address(esp, ld_off)); +++ } else { +++ __ fld(r_1->as_FloatRegister(), Address(esp, next_off)); +++ } +++ } +++ } +++ +++ // 6243940 We might end up in handle_wrong_method if +++ // the callee is deoptimized as we race thru here. If that +++ // happens we don't want to take a safepoint because the +++ // caller frame will look interpreted and arguments are now +++ // "compiled" so it is much better to make this transition +++ // invisible to the stack walking code. Unfortunately if +++ // we try and find the callee by normal means a safepoint +++ // is possible. So we stash the desired callee in the thread +++ // and the vm will find there should this case occur. +++ +++ __ sd(xmethod, Address(xthread, JavaThread::callee_target_offset())); +++ +++ __ jr(t1); +++} +++ +++// --------------------------------------------------------------- +++AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm, +++ int total_args_passed, +++ int comp_args_on_stack, +++ const BasicType *sig_bt, +++ const VMRegPair *regs, +++ AdapterFingerPrint* fingerprint) { +++ address i2c_entry = __ pc(); +++ gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs); +++ +++ address c2i_unverified_entry = __ pc(); +++ Label skip_fixup; +++ +++ Label ok; +++ +++ const Register holder = t1; +++ const Register receiver = j_rarg0; +++ const Register tmp = t2; // A call-clobbered register not used for arg passing +++ +++ // ------------------------------------------------------------------------- +++ // Generate a C2I adapter. On entry we know xmethod holds the Method* during calls +++ // to the interpreter. The args start out packed in the compiled layout. They +++ // need to be unpacked into the interpreter layout. This will almost always +++ // require some stack space. We grow the current (compiled) stack, then repack +++ // the args. We finally end in a jump to the generic interpreter entry point. +++ // On exit from the interpreter, the interpreter will restore our SP (lest the +++ // compiled code, which relys solely on SP and not FP, get sick). +++ +++ { +++ __ block_comment("c2i_unverified_entry {"); +++ __ load_klass(t0, receiver, tmp); +++ __ ld(tmp, Address(holder, CompiledICHolder::holder_klass_offset())); +++ __ ld(xmethod, Address(holder, CompiledICHolder::holder_metadata_offset())); +++ __ beq(t0, tmp, ok); +++ __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub())); +++ +++ __ bind(ok); +++ // Method might have been compiled since the call site was patched to +++ // interpreted; if that is the case treat it as a miss so we can get +++ // the call site corrected. +++ __ ld(t0, Address(xmethod, in_bytes(Method::code_offset()))); +++ __ beqz(t0, skip_fixup); +++ __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub())); +++ __ block_comment("} c2i_unverified_entry"); +++ } +++ +++ address c2i_entry = __ pc(); +++ +++ // Class initialization barrier for static methods +++ address c2i_no_clinit_check_entry = NULL; +++ if (VM_Version::supports_fast_class_init_checks()) { +++ Label L_skip_barrier; +++ +++ { // Bypass the barrier for non-static methods +++ __ lwu(t0, Address(xmethod, Method::access_flags_offset())); +++ __ test_bit(t1, t0, exact_log2(JVM_ACC_STATIC)); +++ __ beqz(t1, L_skip_barrier); // non-static +++ } +++ +++ __ load_method_holder(t1, xmethod); +++ __ clinit_barrier(t1, t0, &L_skip_barrier); +++ __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub())); +++ +++ __ bind(L_skip_barrier); +++ c2i_no_clinit_check_entry = __ pc(); +++ } +++ +++ BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); +++ bs->c2i_entry_barrier(masm); +++ +++ gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup); +++ +++ __ flush(); +++ return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry, c2i_no_clinit_check_entry); +++} +++ +++int SharedRuntime::vector_calling_convention(VMRegPair *regs, +++ uint num_bits, +++ uint total_args_passed) { +++ Unimplemented(); +++ return 0; +++} +++ +++int SharedRuntime::c_calling_convention(const BasicType *sig_bt, +++ VMRegPair *regs, +++ VMRegPair *regs2, +++ int total_args_passed) { +++ assert(regs2 == NULL, "not needed on riscv"); +++ +++ // We return the amount of VMRegImpl stack slots we need to reserve for all +++ // the arguments NOT counting out_preserve_stack_slots. +++ +++ static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = { +++ c_rarg0, c_rarg1, c_rarg2, c_rarg3, +++ c_rarg4, c_rarg5, c_rarg6, c_rarg7 +++ }; +++ static const FloatRegister FP_ArgReg[Argument::n_float_register_parameters_c] = { +++ c_farg0, c_farg1, c_farg2, c_farg3, +++ c_farg4, c_farg5, c_farg6, c_farg7 +++ }; +++ +++ uint int_args = 0; +++ uint fp_args = 0; +++ uint stk_args = 0; // inc by 2 each time +++ +++ for (int i = 0; i < total_args_passed; i++) { +++ switch (sig_bt[i]) { +++ case T_BOOLEAN: // fall through +++ case T_CHAR: // fall through +++ case T_BYTE: // fall through +++ case T_SHORT: // fall through +++ case T_INT: +++ if (int_args < Argument::n_int_register_parameters_c) { +++ regs[i].set1(INT_ArgReg[int_args++]->as_VMReg()); +++ } else { +++ regs[i].set1(VMRegImpl::stack2reg(stk_args)); +++ stk_args += 2; +++ } +++ break; +++ case T_LONG: // fall through +++ assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half"); +++ case T_OBJECT: // fall through +++ case T_ARRAY: // fall through +++ case T_ADDRESS: // fall through +++ case T_METADATA: +++ if (int_args < Argument::n_int_register_parameters_c) { +++ regs[i].set2(INT_ArgReg[int_args++]->as_VMReg()); +++ } else { +++ regs[i].set2(VMRegImpl::stack2reg(stk_args)); +++ stk_args += 2; +++ } +++ break; +++ case T_FLOAT: +++ if (fp_args < Argument::n_float_register_parameters_c) { +++ regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg()); +++ } else if (int_args < Argument::n_int_register_parameters_c) { +++ regs[i].set1(INT_ArgReg[int_args++]->as_VMReg()); +++ } else { +++ regs[i].set1(VMRegImpl::stack2reg(stk_args)); +++ stk_args += 2; +++ } +++ break; +++ case T_DOUBLE: +++ assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half"); +++ if (fp_args < Argument::n_float_register_parameters_c) { +++ regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg()); +++ } else if (int_args < Argument::n_int_register_parameters_c) { +++ regs[i].set2(INT_ArgReg[int_args++]->as_VMReg()); +++ } else { +++ regs[i].set2(VMRegImpl::stack2reg(stk_args)); +++ stk_args += 2; +++ } +++ break; +++ case T_VOID: // Halves of longs and doubles +++ assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half"); +++ regs[i].set_bad(); +++ break; +++ default: +++ ShouldNotReachHere(); +++ } +++ } +++ +++ return stk_args; +++} +++ +++// On 64 bit we will store integer like items to the stack as +++// 64 bits items (riscv64 abi) even though java would only store +++// 32bits for a parameter. On 32bit it will simply be 32 bits +++// So this routine will do 32->32 on 32bit and 32->64 on 64bit +++static void move32_64(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { +++ if (src.first()->is_stack()) { +++ if (dst.first()->is_stack()) { +++ // stack to stack +++ __ ld(t0, Address(fp, reg2offset_in(src.first()))); +++ __ sd(t0, Address(sp, reg2offset_out(dst.first()))); +++ } else { +++ // stack to reg +++ __ lw(dst.first()->as_Register(), Address(fp, reg2offset_in(src.first()))); +++ } +++ } else if (dst.first()->is_stack()) { +++ // reg to stack +++ __ sd(src.first()->as_Register(), Address(sp, reg2offset_out(dst.first()))); +++ } else { +++ if (dst.first() != src.first()) { +++ // 32bits extend sign +++ __ sign_extend(dst.first()->as_Register(), src.first()->as_Register(), 32); +++ } +++ } +++} +++ +++// An oop arg. Must pass a handle not the oop itself +++static void object_move(MacroAssembler* masm, +++ OopMap* map, +++ int oop_handle_offset, +++ int framesize_in_slots, +++ VMRegPair src, +++ VMRegPair dst, +++ bool is_receiver, +++ int* receiver_offset) { +++ // must pass a handle. First figure out the location we use as a handle +++ Register rHandle = dst.first()->is_stack() ? t1 : dst.first()->as_Register(); +++ +++ // See if oop is NULL if it is we need no handle +++ +++ if (src.first()->is_stack()) { +++ +++ // Oop is already on the stack as an argument +++ int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots(); +++ map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots)); +++ if (is_receiver) { +++ *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size; +++ } +++ +++ __ ld(t0, Address(fp, reg2offset_in(src.first()))); +++ __ la(rHandle, Address(fp, reg2offset_in(src.first()))); +++ // conditionally move a NULL +++ Label notZero1; +++ __ bnez(t0, notZero1); +++ __ mv(rHandle, zr); +++ __ bind(notZero1); +++ } else { +++ +++ // Oop is in an a register we must store it to the space we reserve +++ // on the stack for oop_handles and pass a handle if oop is non-NULL +++ +++ const Register rOop = src.first()->as_Register(); +++ int oop_slot = -1; +++ if (rOop == j_rarg0) { +++ oop_slot = 0; +++ } else if (rOop == j_rarg1) { +++ oop_slot = 1; +++ } else if (rOop == j_rarg2) { +++ oop_slot = 2; +++ } else if (rOop == j_rarg3) { +++ oop_slot = 3; +++ } else if (rOop == j_rarg4) { +++ oop_slot = 4; +++ } else if (rOop == j_rarg5) { +++ oop_slot = 5; +++ } else if (rOop == j_rarg6) { +++ oop_slot = 6; +++ } else { +++ assert(rOop == j_rarg7, "wrong register"); +++ oop_slot = 7; +++ } +++ +++ oop_slot = oop_slot * VMRegImpl::slots_per_word + oop_handle_offset; +++ int offset = oop_slot * VMRegImpl::stack_slot_size; +++ +++ map->set_oop(VMRegImpl::stack2reg(oop_slot)); +++ // Store oop in handle area, may be NULL +++ __ sd(rOop, Address(sp, offset)); +++ if (is_receiver) { +++ *receiver_offset = offset; +++ } +++ +++ //rOop maybe the same as rHandle +++ if (rOop == rHandle) { +++ Label isZero; +++ __ beqz(rOop, isZero); +++ __ la(rHandle, Address(sp, offset)); +++ __ bind(isZero); +++ } else { +++ Label notZero2; +++ __ la(rHandle, Address(sp, offset)); +++ __ bnez(rOop, notZero2); +++ __ mv(rHandle, zr); +++ __ bind(notZero2); +++ } +++ } +++ +++ // If arg is on the stack then place it otherwise it is already in correct reg. +++ if (dst.first()->is_stack()) { +++ __ sd(rHandle, Address(sp, reg2offset_out(dst.first()))); +++ } +++} +++ +++// A float arg may have to do float reg int reg conversion +++static void float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { +++ assert(src.first()->is_stack() && dst.first()->is_stack() || +++ src.first()->is_reg() && dst.first()->is_reg() || src.first()->is_stack() && dst.first()->is_reg(), "Unexpected error"); +++ if (src.first()->is_stack()) { +++ if (dst.first()->is_stack()) { +++ __ lwu(t0, Address(fp, reg2offset_in(src.first()))); +++ __ sw(t0, Address(sp, reg2offset_out(dst.first()))); +++ } else if (dst.first()->is_Register()) { +++ __ lwu(dst.first()->as_Register(), Address(fp, reg2offset_in(src.first()))); +++ } else { +++ ShouldNotReachHere(); +++ } +++ } else if (src.first() != dst.first()) { +++ if (src.is_single_phys_reg() && dst.is_single_phys_reg()) { +++ __ fmv_s(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister()); +++ } else { +++ ShouldNotReachHere(); +++ } +++ } +++} +++ +++// A long move +++static void long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { +++ if (src.first()->is_stack()) { +++ if (dst.first()->is_stack()) { +++ // stack to stack +++ __ ld(t0, Address(fp, reg2offset_in(src.first()))); +++ __ sd(t0, Address(sp, reg2offset_out(dst.first()))); +++ } else { +++ // stack to reg +++ __ ld(dst.first()->as_Register(), Address(fp, reg2offset_in(src.first()))); +++ } +++ } else if (dst.first()->is_stack()) { +++ // reg to stack +++ __ sd(src.first()->as_Register(), Address(sp, reg2offset_out(dst.first()))); +++ } else { +++ if (dst.first() != src.first()) { +++ __ mv(dst.first()->as_Register(), src.first()->as_Register()); +++ } +++ } +++} +++ +++// A double move +++static void double_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { +++ assert(src.first()->is_stack() && dst.first()->is_stack() || +++ src.first()->is_reg() && dst.first()->is_reg() || src.first()->is_stack() && dst.first()->is_reg(), "Unexpected error"); +++ if (src.first()->is_stack()) { +++ if (dst.first()->is_stack()) { +++ __ ld(t0, Address(fp, reg2offset_in(src.first()))); +++ __ sd(t0, Address(sp, reg2offset_out(dst.first()))); +++ } else if (dst.first()-> is_Register()) { +++ __ ld(dst.first()->as_Register(), Address(fp, reg2offset_in(src.first()))); +++ } else { +++ ShouldNotReachHere(); +++ } +++ } else if (src.first() != dst.first()) { +++ if (src.is_single_phys_reg() && dst.is_single_phys_reg()) { +++ __ fmv_d(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister()); +++ } else { +++ ShouldNotReachHere(); +++ } +++ } +++} +++ +++void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) { +++ // We always ignore the frame_slots arg and just use the space just below frame pointer +++ // which by this time is free to use +++ switch (ret_type) { +++ case T_FLOAT: +++ __ fsw(f10, Address(fp, -3 * wordSize)); +++ break; +++ case T_DOUBLE: +++ __ fsd(f10, Address(fp, -3 * wordSize)); +++ break; +++ case T_VOID: break; +++ default: { +++ __ sd(x10, Address(fp, -3 * wordSize)); +++ } +++ } +++} +++ +++void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) { +++ // We always ignore the frame_slots arg and just use the space just below frame pointer +++ // which by this time is free to use +++ switch (ret_type) { +++ case T_FLOAT: +++ __ flw(f10, Address(fp, -3 * wordSize)); +++ break; +++ case T_DOUBLE: +++ __ fld(f10, Address(fp, -3 * wordSize)); +++ break; +++ case T_VOID: break; +++ default: { +++ __ ld(x10, Address(fp, -3 * wordSize)); +++ } +++ } +++} +++ +++static void save_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) { +++ RegSet x; +++ for ( int i = first_arg ; i < arg_count ; i++ ) { +++ if (args[i].first()->is_Register()) { +++ x = x + args[i].first()->as_Register(); +++ } else if (args[i].first()->is_FloatRegister()) { +++ __ addi(sp, sp, -2 * wordSize); +++ __ fsd(args[i].first()->as_FloatRegister(), Address(sp, 0)); +++ } +++ } +++ __ push_reg(x, sp); +++} +++ +++static void restore_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) { +++ RegSet x; +++ for ( int i = first_arg ; i < arg_count ; i++ ) { +++ if (args[i].first()->is_Register()) { +++ x = x + args[i].first()->as_Register(); +++ } else { +++ ; +++ } +++ } +++ __ pop_reg(x, sp); +++ for ( int i = arg_count - 1 ; i >= first_arg ; i-- ) { +++ if (args[i].first()->is_Register()) { +++ ; +++ } else if (args[i].first()->is_FloatRegister()) { +++ __ fld(args[i].first()->as_FloatRegister(), Address(sp, 0)); +++ __ add(sp, sp, 2 * wordSize); +++ } +++ } +++} +++ +++// Unpack an array argument into a pointer to the body and the length +++// if the array is non-null, otherwise pass 0 for both. +++static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType in_elem_type, VMRegPair body_arg, VMRegPair length_arg) { Unimplemented(); } +++ +++class ComputeMoveOrder: public StackObj { +++ class MoveOperation: public ResourceObj { +++ friend class ComputeMoveOrder; +++ private: +++ VMRegPair _src; +++ VMRegPair _dst; +++ int _src_index; +++ int _dst_index; +++ bool _processed; +++ MoveOperation* _next; +++ MoveOperation* _prev; +++ +++ static int get_id(VMRegPair r) { Unimplemented(); return 0; } +++ +++ public: +++ MoveOperation(int src_index, VMRegPair src, int dst_index, VMRegPair dst): +++ _src(src) +++ , _dst(dst) +++ , _src_index(src_index) +++ , _dst_index(dst_index) +++ , _processed(false) +++ , _next(NULL) +++ , _prev(NULL) { Unimplemented(); } +++ +++ VMRegPair src() const { Unimplemented(); return _src; } +++ int src_id() const { Unimplemented(); return 0; } +++ int src_index() const { Unimplemented(); return 0; } +++ VMRegPair dst() const { Unimplemented(); return _src; } +++ void set_dst(int i, VMRegPair dst) { Unimplemented(); } +++ int dst_index() const { Unimplemented(); return 0; } +++ int dst_id() const { Unimplemented(); return 0; } +++ MoveOperation* next() const { Unimplemented(); return 0; } +++ MoveOperation* prev() const { Unimplemented(); return 0; } +++ void set_processed() { Unimplemented(); } +++ bool is_processed() const { Unimplemented(); return 0; } +++ +++ // insert +++ void break_cycle(VMRegPair temp_register) { Unimplemented(); } +++ +++ void link(GrowableArray& killer) { Unimplemented(); } +++ }; +++ +++ private: +++ GrowableArray edges; +++ +++ public: +++ ComputeMoveOrder(int total_in_args, VMRegPair* in_regs, int total_c_args, VMRegPair* out_regs, +++ BasicType* in_sig_bt, GrowableArray& arg_order, VMRegPair tmp_vmreg) { Unimplemented(); } +++ +++ // Collected all the move operations +++ void add_edge(int src_index, VMRegPair src, int dst_index, VMRegPair dst) { Unimplemented(); } +++ +++ // Walk the edges breaking cycles between moves. The result list +++ // can be walked in order to produce the proper set of loads +++ GrowableArray* get_store_order(VMRegPair temp_register) { Unimplemented(); return 0; } +++}; +++ +++static void rt_call(MacroAssembler* masm, address dest) { +++ CodeBlob *cb = CodeCache::find_blob(dest); +++ RuntimeAddress target(dest); +++ if (cb) { +++ __ far_call(target); +++ } else { +++ __ relocate(target.rspec(), [&] { +++ int32_t offset; +++ __ la_patchable(t0, target, offset); +++ __ jalr(x1, t0, offset); +++ }); +++ } +++} +++ +++static void verify_oop_args(MacroAssembler* masm, +++ const methodHandle& method, +++ const BasicType* sig_bt, +++ const VMRegPair* regs) { +++ const Register temp_reg = x9; // not part of any compiled calling seq +++ if (VerifyOops) { +++ for (int i = 0; i < method->size_of_parameters(); i++) { +++ if (sig_bt[i] == T_OBJECT || +++ sig_bt[i] == T_ARRAY) { +++ VMReg r = regs[i].first(); +++ assert(r->is_valid(), "bad oop arg"); +++ if (r->is_stack()) { +++ __ ld(temp_reg, Address(sp, r->reg2stack() * VMRegImpl::stack_slot_size)); +++ __ verify_oop(temp_reg); +++ } else { +++ __ verify_oop(r->as_Register()); +++ } +++ } +++ } +++ } +++} +++ +++static void gen_special_dispatch(MacroAssembler* masm, +++ const methodHandle& method, +++ const BasicType* sig_bt, +++ const VMRegPair* regs) { +++ verify_oop_args(masm, method, sig_bt, regs); +++ vmIntrinsics::ID iid = method->intrinsic_id(); +++ +++ // Now write the args into the outgoing interpreter space +++ bool has_receiver = false; +++ Register receiver_reg = noreg; +++ int member_arg_pos = -1; +++ Register member_reg = noreg; +++ int ref_kind = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid); +++ if (ref_kind != 0) { +++ member_arg_pos = method->size_of_parameters() - 1; // trailing MemberName argument +++ member_reg = x9; // known to be free at this point +++ has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind); +++ } else if (iid == vmIntrinsics::_invokeBasic || iid == vmIntrinsics::_linkToNative) { +++ has_receiver = true; +++ } else { +++ fatal("unexpected intrinsic id %d", vmIntrinsics::as_int(iid)); +++ } +++ +++ if (member_reg != noreg) { +++ // Load the member_arg into register, if necessary. +++ SharedRuntime::check_member_name_argument_is_last_argument(method, sig_bt, regs); +++ VMReg r = regs[member_arg_pos].first(); +++ if (r->is_stack()) { +++ __ ld(member_reg, Address(sp, r->reg2stack() * VMRegImpl::stack_slot_size)); +++ } else { +++ // no data motion is needed +++ member_reg = r->as_Register(); +++ } +++ } +++ +++ if (has_receiver) { +++ // Make sure the receiver is loaded into a register. +++ assert(method->size_of_parameters() > 0, "oob"); +++ assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object"); +++ VMReg r = regs[0].first(); +++ assert(r->is_valid(), "bad receiver arg"); +++ if (r->is_stack()) { +++ // Porting note: This assumes that compiled calling conventions always +++ // pass the receiver oop in a register. If this is not true on some +++ // platform, pick a temp and load the receiver from stack. +++ fatal("receiver always in a register"); +++ receiver_reg = x12; // known to be free at this point +++ __ ld(receiver_reg, Address(sp, r->reg2stack() * VMRegImpl::stack_slot_size)); +++ } else { +++ // no data motion is needed +++ receiver_reg = r->as_Register(); +++ } +++ } +++ +++ // Figure out which address we are really jumping to: +++ MethodHandles::generate_method_handle_dispatch(masm, iid, +++ receiver_reg, member_reg, /*for_compiler_entry:*/ true); +++} +++ +++// --------------------------------------------------------------------------- +++// Generate a native wrapper for a given method. The method takes arguments +++// in the Java compiled code convention, marshals them to the native +++// convention (handlizes oops, etc), transitions to native, makes the call, +++// returns to java state (possibly blocking), unhandlizes any result and +++// returns. +++// +++// Critical native functions are a shorthand for the use of +++// GetPrimtiveArrayCritical and disallow the use of any other JNI +++// functions. The wrapper is expected to unpack the arguments before +++// passing them to the callee and perform checks before and after the +++// native call to ensure that they GCLocker +++// lock_critical/unlock_critical semantics are followed. Some other +++// parts of JNI setup are skipped like the tear down of the JNI handle +++// block and the check for pending exceptions it's impossible for them +++// to be thrown. +++// +++// They are roughly structured like this: +++// if (GCLocker::needs_gc()) SharedRuntime::block_for_jni_critical() +++// tranistion to thread_in_native +++// unpack arrray arguments and call native entry point +++// check for safepoint in progress +++// check if any thread suspend flags are set +++// call into JVM and possible unlock the JNI critical +++// if a GC was suppressed while in the critical native. +++// transition back to thread_in_Java +++// return to caller +++// +++nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm, +++ const methodHandle& method, +++ int compile_id, +++ BasicType* in_sig_bt, +++ VMRegPair* in_regs, +++ BasicType ret_type, +++ address critical_entry) { +++ if (method->is_method_handle_intrinsic()) { +++ vmIntrinsics::ID iid = method->intrinsic_id(); +++ intptr_t start = (intptr_t)__ pc(); +++ int vep_offset = ((intptr_t)__ pc()) - start; +++ +++ // First instruction must be a nop as it may need to be patched on deoptimisation +++ { +++ Assembler::IncompressibleRegion ir(masm); // keep the nop as 4 bytes for patching. +++ MacroAssembler::assert_alignment(__ pc()); +++ __ nop(); // 4 bytes +++ } +++ gen_special_dispatch(masm, +++ method, +++ in_sig_bt, +++ in_regs); +++ int frame_complete = ((intptr_t)__ pc()) - start; // not complete, period +++ __ flush(); +++ int stack_slots = SharedRuntime::out_preserve_stack_slots(); // no out slots at all, actually +++ return nmethod::new_native_nmethod(method, +++ compile_id, +++ masm->code(), +++ vep_offset, +++ frame_complete, +++ stack_slots / VMRegImpl::slots_per_word, +++ in_ByteSize(-1), +++ in_ByteSize(-1), +++ (OopMapSet*)NULL); +++ } +++ bool is_critical_native = true; +++ address native_func = critical_entry; +++ if (native_func == NULL) { +++ native_func = method->native_function(); +++ is_critical_native = false; +++ } +++ assert(native_func != NULL, "must have function"); +++ +++ // An OopMap for lock (and class if static) +++ OopMapSet *oop_maps = new OopMapSet(); +++ assert_cond(oop_maps != NULL); +++ intptr_t start = (intptr_t)__ pc(); +++ +++ // We have received a description of where all the java arg are located +++ // on entry to the wrapper. We need to convert these args to where +++ // the jni function will expect them. To figure out where they go +++ // we convert the java signature to a C signature by inserting +++ // the hidden arguments as arg[0] and possibly arg[1] (static method) +++ +++ const int total_in_args = method->size_of_parameters(); +++ int total_c_args = total_in_args; +++ if (!is_critical_native) { +++ total_c_args += 1; +++ if (method->is_static()) { +++ total_c_args++; +++ } +++ } else { +++ for (int i = 0; i < total_in_args; i++) { +++ if (in_sig_bt[i] == T_ARRAY) { +++ total_c_args++; +++ } +++ } +++ } +++ +++ BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args); +++ VMRegPair* out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args); +++ BasicType* in_elem_bt = NULL; +++ +++ int argc = 0; +++ if (!is_critical_native) { +++ out_sig_bt[argc++] = T_ADDRESS; +++ if (method->is_static()) { +++ out_sig_bt[argc++] = T_OBJECT; +++ } +++ +++ for (int i = 0; i < total_in_args ; i++ ) { +++ out_sig_bt[argc++] = in_sig_bt[i]; +++ } +++ } else { +++ in_elem_bt = NEW_RESOURCE_ARRAY(BasicType, total_in_args); +++ SignatureStream ss(method->signature()); +++ for (int i = 0; i < total_in_args ; i++ ) { +++ if (in_sig_bt[i] == T_ARRAY) { +++ // Arrays are passed as int, elem* pair +++ out_sig_bt[argc++] = T_INT; +++ out_sig_bt[argc++] = T_ADDRESS; +++ ss.skip_array_prefix(1); // skip one '[' +++ assert(ss.is_primitive(), "primitive type expected"); +++ in_elem_bt[i] = ss.type(); +++ } else { +++ out_sig_bt[argc++] = in_sig_bt[i]; +++ in_elem_bt[i] = T_VOID; +++ } +++ if (in_sig_bt[i] != T_VOID) { +++ assert(in_sig_bt[i] == ss.type() || +++ in_sig_bt[i] == T_ARRAY, "must match"); +++ ss.next(); +++ } +++ } +++ } +++ +++ // Now figure out where the args must be stored and how much stack space +++ // they require. +++ int out_arg_slots = c_calling_convention(out_sig_bt, out_regs, NULL, total_c_args); +++ +++ // Compute framesize for the wrapper. We need to handlize all oops in +++ // incoming registers +++ +++ // Calculate the total number of stack slots we will need. +++ +++ // First count the abi requirement plus all of the outgoing args +++ int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots; +++ +++ // Now the space for the inbound oop handle area +++ int total_save_slots = 8 * VMRegImpl::slots_per_word; // 8 arguments passed in registers +++ if (is_critical_native) { +++ // Critical natives may have to call out so they need a save area +++ // for register arguments. +++ int double_slots = 0; +++ int single_slots = 0; +++ for ( int i = 0; i < total_in_args; i++) { +++ if (in_regs[i].first()->is_Register()) { +++ const Register reg = in_regs[i].first()->as_Register(); +++ switch (in_sig_bt[i]) { +++ case T_BOOLEAN: +++ case T_BYTE: +++ case T_SHORT: +++ case T_CHAR: +++ case T_INT: single_slots++; break; +++ case T_ARRAY: // specific to LP64 (7145024) +++ case T_LONG: double_slots++; break; +++ default: ShouldNotReachHere(); +++ } +++ } else if (in_regs[i].first()->is_FloatRegister()) { +++ ShouldNotReachHere(); +++ } +++ } +++ total_save_slots = double_slots * 2 + single_slots; +++ // align the save area +++ if (double_slots != 0) { +++ stack_slots = align_up(stack_slots, 2); +++ } +++ } +++ +++ int oop_handle_offset = stack_slots; +++ stack_slots += total_save_slots; +++ +++ // Now any space we need for handlizing a klass if static method +++ +++ int klass_slot_offset = 0; +++ int klass_offset = -1; +++ int lock_slot_offset = 0; +++ bool is_static = false; +++ +++ if (method->is_static()) { +++ klass_slot_offset = stack_slots; +++ stack_slots += VMRegImpl::slots_per_word; +++ klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size; +++ is_static = true; +++ } +++ +++ // Plus a lock if needed +++ +++ if (method->is_synchronized()) { +++ lock_slot_offset = stack_slots; +++ stack_slots += VMRegImpl::slots_per_word; +++ } +++ +++ // Now a place (+2) to save return values or temp during shuffling +++ // + 4 for return address (which we own) and saved fp +++ stack_slots += 6; +++ +++ // Ok The space we have allocated will look like: +++ // +++ // +++ // FP-> | | +++ // | 2 slots (ra) | +++ // | 2 slots (fp) | +++ // |---------------------| +++ // | 2 slots for moves | +++ // |---------------------| +++ // | lock box (if sync) | +++ // |---------------------| <- lock_slot_offset +++ // | klass (if static) | +++ // |---------------------| <- klass_slot_offset +++ // | oopHandle area | +++ // |---------------------| <- oop_handle_offset (8 java arg registers) +++ // | outbound memory | +++ // | based arguments | +++ // | | +++ // |---------------------| +++ // | | +++ // SP-> | out_preserved_slots | +++ // +++ // +++ +++ +++ // Now compute actual number of stack words we need rounding to make +++ // stack properly aligned. +++ stack_slots = align_up(stack_slots, StackAlignmentInSlots); +++ +++ int stack_size = stack_slots * VMRegImpl::stack_slot_size; +++ +++ // First thing make an ic check to see if we should even be here +++ +++ // We are free to use all registers as temps without saving them and +++ // restoring them except fp. fp is the only callee save register +++ // as far as the interpreter and the compiler(s) are concerned. +++ +++ +++ const Register ic_reg = t1; +++ const Register receiver = j_rarg0; +++ +++ Label hit; +++ Label exception_pending; +++ +++ __ verify_oop(receiver); +++ assert_different_registers(ic_reg, receiver, t0, t2); +++ __ cmp_klass(receiver, ic_reg, t0, t2 /* call-clobbered t2 as a tmp */, hit); +++ +++ __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub())); +++ +++ // Verified entry point must be aligned +++ __ align(8); +++ +++ __ bind(hit); +++ +++ int vep_offset = ((intptr_t)__ pc()) - start; +++ +++ // If we have to make this method not-entrant we'll overwrite its +++ // first instruction with a jump. +++ { +++ Assembler::IncompressibleRegion ir(masm); // keep the nop as 4 bytes for patching. +++ MacroAssembler::assert_alignment(__ pc()); +++ __ nop(); // 4 bytes +++ } +++ +++ if (VM_Version::supports_fast_class_init_checks() && method->needs_clinit_barrier()) { +++ Label L_skip_barrier; +++ __ mov_metadata(t1, method->method_holder()); // InstanceKlass* +++ __ clinit_barrier(t1, t0, &L_skip_barrier); +++ __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub())); +++ +++ __ bind(L_skip_barrier); +++ } +++ +++ // Generate stack overflow check +++ __ bang_stack_with_offset(checked_cast(StackOverflow::stack_shadow_zone_size())); +++ +++ // Generate a new frame for the wrapper. +++ __ enter(); +++ // -2 because return address is already present and so is saved fp +++ __ sub(sp, sp, stack_size - 2 * wordSize); +++ +++ BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); +++ assert_cond(bs != NULL); +++ bs->nmethod_entry_barrier(masm); +++ +++ // Frame is now completed as far as size and linkage. +++ int frame_complete = ((intptr_t)__ pc()) - start; +++ +++ // We use x18 as the oop handle for the receiver/klass +++ // It is callee save so it survives the call to native +++ +++ const Register oop_handle_reg = x18; +++ +++ // +++ // We immediately shuffle the arguments so that any vm call we have to +++ // make from here on out (sync slow path, jvmti, etc.) we will have +++ // captured the oops from our caller and have a valid oopMap for +++ // them. +++ +++ // ----------------- +++ // The Grand Shuffle +++ +++ // The Java calling convention is either equal (linux) or denser (win64) than the +++ // c calling convention. However the because of the jni_env argument the c calling +++ // convention always has at least one more (and two for static) arguments than Java. +++ // Therefore if we move the args from java -> c backwards then we will never have +++ // a register->register conflict and we don't have to build a dependency graph +++ // and figure out how to break any cycles. +++ // +++ +++ // Record esp-based slot for receiver on stack for non-static methods +++ int receiver_offset = -1; +++ +++ // This is a trick. We double the stack slots so we can claim +++ // the oops in the caller's frame. Since we are sure to have +++ // more args than the caller doubling is enough to make +++ // sure we can capture all the incoming oop args from the +++ // caller. +++ // +++ OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/); +++ assert_cond(map != NULL); +++ +++ int float_args = 0; +++ int int_args = 0; +++ +++#ifdef ASSERT +++ bool reg_destroyed[RegisterImpl::number_of_registers]; +++ bool freg_destroyed[FloatRegisterImpl::number_of_registers]; +++ for ( int r = 0 ; r < RegisterImpl::number_of_registers ; r++ ) { +++ reg_destroyed[r] = false; +++ } +++ for ( int f = 0 ; f < FloatRegisterImpl::number_of_registers ; f++ ) { +++ freg_destroyed[f] = false; +++ } +++ +++#endif /* ASSERT */ +++ +++ // This may iterate in two different directions depending on the +++ // kind of native it is. The reason is that for regular JNI natives +++ // the incoming and outgoing registers are offset upwards and for +++ // critical natives they are offset down. +++ GrowableArray arg_order(2 * total_in_args); +++ VMRegPair tmp_vmreg; +++ tmp_vmreg.set2(x9->as_VMReg()); +++ +++ if (!is_critical_native) { +++ for (int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0; i--, c_arg--) { +++ arg_order.push(i); +++ arg_order.push(c_arg); +++ } +++ } else { +++ // Compute a valid move order, using tmp_vmreg to break any cycles +++ ComputeMoveOrder cmo(total_in_args, in_regs, total_c_args, out_regs, in_sig_bt, arg_order, tmp_vmreg); +++ } +++ +++ int temploc = -1; +++ for (int ai = 0; ai < arg_order.length(); ai += 2) { +++ int i = arg_order.at(ai); +++ int c_arg = arg_order.at(ai + 1); +++ __ block_comment(err_msg("mv %d -> %d", i, c_arg)); +++ if (c_arg == -1) { +++ assert(is_critical_native, "should only be required for critical natives"); +++ // This arg needs to be moved to a temporary +++ __ mv(tmp_vmreg.first()->as_Register(), in_regs[i].first()->as_Register()); +++ in_regs[i] = tmp_vmreg; +++ temploc = i; +++ continue; +++ } else if (i == -1) { +++ assert(is_critical_native, "should only be required for critical natives"); +++ // Read from the temporary location +++ assert(temploc != -1, "must be valid"); +++ i = temploc; +++ temploc = -1; +++ } +++#ifdef ASSERT +++ if (in_regs[i].first()->is_Register()) { +++ assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "destroyed reg!"); +++ } else if (in_regs[i].first()->is_FloatRegister()) { +++ assert(!freg_destroyed[in_regs[i].first()->as_FloatRegister()->encoding()], "destroyed reg!"); +++ } +++ if (out_regs[c_arg].first()->is_Register()) { +++ reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true; +++ } else if (out_regs[c_arg].first()->is_FloatRegister()) { +++ freg_destroyed[out_regs[c_arg].first()->as_FloatRegister()->encoding()] = true; +++ } +++#endif /* ASSERT */ +++ switch (in_sig_bt[i]) { +++ case T_ARRAY: +++ if (is_critical_native) { +++ unpack_array_argument(masm, in_regs[i], in_elem_bt[i], out_regs[c_arg + 1], out_regs[c_arg]); +++ c_arg++; +++#ifdef ASSERT +++ if (out_regs[c_arg].first()->is_Register()) { +++ reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true; +++ } else if (out_regs[c_arg].first()->is_FloatRegister()) { +++ freg_destroyed[out_regs[c_arg].first()->as_FloatRegister()->encoding()] = true; +++ } +++#endif +++ int_args++; +++ break; +++ } +++ case T_OBJECT: +++ assert(!is_critical_native, "no oop arguments"); +++ object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg], +++ ((i == 0) && (!is_static)), +++ &receiver_offset); +++ int_args++; +++ break; +++ case T_VOID: +++ break; +++ +++ case T_FLOAT: +++ float_move(masm, in_regs[i], out_regs[c_arg]); +++ float_args++; +++ break; +++ +++ case T_DOUBLE: +++ assert( i + 1 < total_in_args && +++ in_sig_bt[i + 1] == T_VOID && +++ out_sig_bt[c_arg + 1] == T_VOID, "bad arg list"); +++ double_move(masm, in_regs[i], out_regs[c_arg]); +++ float_args++; +++ break; +++ +++ case T_LONG : +++ long_move(masm, in_regs[i], out_regs[c_arg]); +++ int_args++; +++ break; +++ +++ case T_ADDRESS: +++ assert(false, "found T_ADDRESS in java args"); +++ break; +++ +++ default: +++ move32_64(masm, in_regs[i], out_regs[c_arg]); +++ int_args++; +++ } +++ } +++ +++ // point c_arg at the first arg that is already loaded in case we +++ // need to spill before we call out +++ int c_arg = total_c_args - total_in_args; +++ +++ // Pre-load a static method's oop into c_rarg1. +++ if (method->is_static() && !is_critical_native) { +++ +++ // load oop into a register +++ __ movoop(c_rarg1, +++ JNIHandles::make_local(method->method_holder()->java_mirror()), +++ /*immediate*/true); +++ +++ // Now handlize the static class mirror it's known not-null. +++ __ sd(c_rarg1, Address(sp, klass_offset)); +++ map->set_oop(VMRegImpl::stack2reg(klass_slot_offset)); +++ +++ // Now get the handle +++ __ la(c_rarg1, Address(sp, klass_offset)); +++ // and protect the arg if we must spill +++ c_arg--; +++ } +++ +++ // Change state to native (we save the return address in the thread, since it might not +++ // be pushed on the stack when we do a stack traversal). +++ // We use the same pc/oopMap repeatedly when we call out +++ +++ Label native_return; +++ __ set_last_Java_frame(sp, noreg, native_return, t0); +++ +++ Label dtrace_method_entry, dtrace_method_entry_done; +++ { +++ ExternalAddress target((address)&DTraceMethodProbes); +++ __ relocate(target.rspec(), [&] { +++ int32_t offset; +++ __ la_patchable(t0, target, offset); +++ __ lbu(t0, Address(t0, offset)); +++ }); +++ __ bnez(t0, dtrace_method_entry); +++ __ bind(dtrace_method_entry_done); +++ } +++ +++ // RedefineClasses() tracing support for obsolete method entry +++ if (log_is_enabled(Trace, redefine, class, obsolete)) { +++ // protect the args we've loaded +++ save_args(masm, total_c_args, c_arg, out_regs); +++ __ mov_metadata(c_rarg1, method()); +++ __ call_VM_leaf( +++ CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry), +++ xthread, c_rarg1); +++ restore_args(masm, total_c_args, c_arg, out_regs); +++ } +++ +++ // Lock a synchronized method +++ +++ // Register definitions used by locking and unlocking +++ +++ const Register swap_reg = x10; +++ const Register obj_reg = x9; // Will contain the oop +++ const Register lock_reg = x30; // Address of compiler lock object (BasicLock) +++ const Register old_hdr = x30; // value of old header at unlock time +++ const Register tmp = ra; +++ +++ Label slow_path_lock; +++ Label lock_done; +++ +++ if (method->is_synchronized()) { +++ +++ const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes(); +++ +++ // Get the handle (the 2nd argument) +++ __ mv(oop_handle_reg, c_rarg1); +++ +++ // Get address of the box +++ __ la(lock_reg, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size)); +++ +++ // Load the oop from the handle +++ __ ld(obj_reg, Address(oop_handle_reg, 0)); +++ +++ if (UseBiasedLocking) { +++ __ biased_locking_enter(lock_reg, obj_reg, swap_reg, tmp, false, lock_done, &slow_path_lock); +++ } +++ +++ // Load (object->mark() | 1) into swap_reg % x10 +++ __ ld(t0, Address(obj_reg, oopDesc::mark_offset_in_bytes())); +++ __ ori(swap_reg, t0, 1); +++ +++ // Save (object->mark() | 1) into BasicLock's displaced header +++ __ sd(swap_reg, Address(lock_reg, mark_word_offset)); +++ +++ // src -> dest if dest == x10 else x10 <- dest +++ { +++ Label here; +++ __ cmpxchg_obj_header(x10, lock_reg, obj_reg, t0, lock_done, /*fallthrough*/NULL); +++ } +++ +++ // Test if the oopMark is an obvious stack pointer, i.e., +++ // 1) (mark & 3) == 0, and +++ // 2) sp <= mark < mark + os::pagesize() +++ // These 3 tests can be done by evaluating the following +++ // expression: ((mark - sp) & (3 - os::vm_page_size())), +++ // assuming both stack pointer and pagesize have their +++ // least significant 2 bits clear. +++ // NOTE: the oopMark is in swap_reg % 10 as the result of cmpxchg +++ +++ __ sub(swap_reg, swap_reg, sp); +++ __ andi(swap_reg, swap_reg, 3 - os::vm_page_size()); +++ +++ // Save the test result, for recursive case, the result is zero +++ __ sd(swap_reg, Address(lock_reg, mark_word_offset)); +++ __ bnez(swap_reg, slow_path_lock); +++ +++ // Slow path will re-enter here +++ __ bind(lock_done); +++ } +++ +++ +++ // Finally just about ready to make the JNI call +++ +++ // get JNIEnv* which is first argument to native +++ if (!is_critical_native) { +++ __ la(c_rarg0, Address(xthread, in_bytes(JavaThread::jni_environment_offset()))); +++ +++ // Now set thread in native +++ __ la(t1, Address(xthread, JavaThread::thread_state_offset())); +++ __ mv(t0, _thread_in_native); +++ __ membar(MacroAssembler::LoadStore | MacroAssembler::StoreStore); +++ __ sw(t0, Address(t1)); +++ } +++ +++ rt_call(masm, native_func); +++ +++ __ bind(native_return); +++ +++ intptr_t return_pc = (intptr_t) __ pc(); +++ oop_maps->add_gc_map(return_pc - start, map); +++ +++ // Unpack native results. +++ if (ret_type != T_OBJECT && ret_type != T_ARRAY) { +++ __ cast_primitive_type(ret_type, x10); +++ } +++ +++ Label safepoint_in_progress, safepoint_in_progress_done; +++ Label after_transition; +++ +++ // If this is a critical native, check for a safepoint or suspend request after the call. +++ // If a safepoint is needed, transition to native, then to native_trans to handle +++ // safepoint like the native methods that are not critical natives. +++ if (is_critical_native) { +++ Label needs_safepoint; +++ __ safepoint_poll(needs_safepoint, false /* as_return */, true /* acquire */, false /* in_nmethod */); +++ __ lwu(t0, Address(xthread, JavaThread::suspend_flags_offset())); +++ __ bnez(t0, needs_safepoint); +++ __ j(after_transition); +++ __ bind(needs_safepoint); +++ } +++ +++ // Switch thread to "native transition" state before reading the synchronization state. +++ // This additional state is necessary because reading and testing the synchronization +++ // state is not atomic w.r.t. GC, as this scenario demonstrates: +++ // Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted. +++ // VM thread changes sync state to synchronizing and suspends threads for GC. +++ // Thread A is resumed to finish this native method, but doesn't block here since it +++ // didn't see any synchronization is progress, and escapes. +++ __ mv(t0, _thread_in_native_trans); +++ +++ __ sw(t0, Address(xthread, JavaThread::thread_state_offset())); +++ +++ // Force this write out before the read below +++ __ membar(MacroAssembler::AnyAny); +++ +++ // check for safepoint operation in progress and/or pending suspend requests +++ { +++ // We need an acquire here to ensure that any subsequent load of the +++ // global SafepointSynchronize::_state flag is ordered after this load +++ // of the thread-local polling word. We don't want this poll to +++ // return false (i.e. not safepointing) and a later poll of the global +++ // SafepointSynchronize::_state spuriously to return true. +++ // This is to avoid a race when we're in a native->Java transition +++ // racing the code which wakes up from a safepoint. +++ +++ __ safepoint_poll(safepoint_in_progress, true /* at_return */, true /* acquire */, false /* in_nmethod */); +++ __ lwu(t0, Address(xthread, JavaThread::suspend_flags_offset())); +++ __ bnez(t0, safepoint_in_progress); +++ __ bind(safepoint_in_progress_done); +++ } +++ +++ // change thread state +++ __ la(t1, Address(xthread, JavaThread::thread_state_offset())); +++ __ mv(t0, _thread_in_Java); +++ __ membar(MacroAssembler::LoadStore | MacroAssembler::StoreStore); +++ __ sw(t0, Address(t1)); +++ __ bind(after_transition); +++ +++ Label reguard; +++ Label reguard_done; +++ __ lbu(t0, Address(xthread, JavaThread::stack_guard_state_offset())); +++ __ mv(t1, StackOverflow::stack_guard_yellow_reserved_disabled); +++ __ beq(t0, t1, reguard); +++ __ bind(reguard_done); +++ +++ // native result if any is live +++ +++ // Unlock +++ Label unlock_done; +++ Label slow_path_unlock; +++ if (method->is_synchronized()) { +++ +++ // Get locked oop from the handle we passed to jni +++ __ ld(obj_reg, Address(oop_handle_reg, 0)); +++ +++ Label done; +++ +++ if (UseBiasedLocking) { +++ __ biased_locking_exit(obj_reg, old_hdr, done); +++ } +++ +++ // Simple recursive lock? +++ __ ld(t0, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size)); +++ __ beqz(t0, done); +++ +++ // Must save x10 if it is live now because cmpxchg must use it +++ if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) { +++ save_native_result(masm, ret_type, stack_slots); +++ } +++ +++ // get address of the stack lock +++ __ la(x10, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size)); +++ // get old displaced header +++ __ ld(old_hdr, Address(x10, 0)); +++ +++ // Atomic swap old header if oop still contains the stack lock +++ Label succeed; +++ __ cmpxchg_obj_header(x10, old_hdr, obj_reg, t0, succeed, &slow_path_unlock); +++ __ bind(succeed); +++ +++ // slow path re-enters here +++ __ bind(unlock_done); +++ if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) { +++ restore_native_result(masm, ret_type, stack_slots); +++ } +++ +++ __ bind(done); +++ } +++ +++ Label dtrace_method_exit, dtrace_method_exit_done; +++ { +++ ExternalAddress target((address)&DTraceMethodProbes); +++ __ relocate(target.rspec(), [&] { +++ int32_t offset; +++ __ la_patchable(t0, target, offset); +++ __ lbu(t0, Address(t0, offset)); +++ }); +++ __ bnez(t0, dtrace_method_exit); +++ __ bind(dtrace_method_exit_done); +++ } +++ +++ __ reset_last_Java_frame(false); +++ +++ // Unbox oop result, e.g. JNIHandles::resolve result. +++ if (is_reference_type(ret_type)) { +++ __ resolve_jobject(x10, xthread, t1); +++ } +++ +++ if (CheckJNICalls) { +++ // clear_pending_jni_exception_check +++ __ sd(zr, Address(xthread, JavaThread::pending_jni_exception_check_fn_offset())); +++ } +++ +++ if (!is_critical_native) { +++ // reset handle block +++ __ ld(x12, Address(xthread, JavaThread::active_handles_offset())); +++ __ sd(zr, Address(x12, JNIHandleBlock::top_offset_in_bytes())); +++ } +++ +++ __ leave(); +++ +++ if (!is_critical_native) { +++ // Any exception pending? +++ __ ld(t0, Address(xthread, in_bytes(Thread::pending_exception_offset()))); +++ __ bnez(t0, exception_pending); +++ } +++ +++ // We're done +++ __ ret(); +++ +++ // Unexpected paths are out of line and go here +++ +++ if (!is_critical_native) { +++ // forward the exception +++ __ bind(exception_pending); +++ +++ // and forward the exception +++ __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry())); +++ } +++ +++ // Slow path locking & unlocking +++ if (method->is_synchronized()) { +++ +++ __ block_comment("Slow path lock {"); +++ __ bind(slow_path_lock); +++ +++ // has last_Java_frame setup. No exceptions so do vanilla call not call_VM +++ // args are (oop obj, BasicLock* lock, JavaThread* thread) +++ +++ // protect the args we've loaded +++ save_args(masm, total_c_args, c_arg, out_regs); +++ +++ __ mv(c_rarg0, obj_reg); +++ __ mv(c_rarg1, lock_reg); +++ __ mv(c_rarg2, xthread); +++ +++ // Not a leaf but we have last_Java_frame setup as we want +++ __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), 3); +++ restore_args(masm, total_c_args, c_arg, out_regs); +++ +++#ifdef ASSERT +++ { Label L; +++ __ ld(t0, Address(xthread, in_bytes(Thread::pending_exception_offset()))); +++ __ beqz(t0, L); +++ __ stop("no pending exception allowed on exit from monitorenter"); +++ __ bind(L); +++ } +++#endif +++ __ j(lock_done); +++ +++ __ block_comment("} Slow path lock"); +++ +++ __ block_comment("Slow path unlock {"); +++ __ bind(slow_path_unlock); +++ +++ if (ret_type == T_FLOAT || ret_type == T_DOUBLE) { +++ save_native_result(masm, ret_type, stack_slots); +++ } +++ +++ __ mv(c_rarg2, xthread); +++ __ la(c_rarg1, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size)); +++ __ mv(c_rarg0, obj_reg); +++ +++ // Save pending exception around call to VM (which contains an EXCEPTION_MARK) +++ // NOTE that obj_reg == x9 currently +++ __ ld(x9, Address(xthread, in_bytes(Thread::pending_exception_offset()))); +++ __ sd(zr, Address(xthread, in_bytes(Thread::pending_exception_offset()))); +++ +++ rt_call(masm, CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C)); +++ +++#ifdef ASSERT +++ { +++ Label L; +++ __ ld(t0, Address(xthread, in_bytes(Thread::pending_exception_offset()))); +++ __ beqz(t0, L); +++ __ stop("no pending exception allowed on exit complete_monitor_unlocking_C"); +++ __ bind(L); +++ } +++#endif /* ASSERT */ +++ +++ __ sd(x9, Address(xthread, in_bytes(Thread::pending_exception_offset()))); +++ +++ if (ret_type == T_FLOAT || ret_type == T_DOUBLE) { +++ restore_native_result(masm, ret_type, stack_slots); +++ } +++ __ j(unlock_done); +++ +++ __ block_comment("} Slow path unlock"); +++ +++ } // synchronized +++ +++ // SLOW PATH Reguard the stack if needed +++ +++ __ bind(reguard); +++ save_native_result(masm, ret_type, stack_slots); +++ rt_call(masm, CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)); +++ restore_native_result(masm, ret_type, stack_slots); +++ // and continue +++ __ j(reguard_done); +++ +++ // SLOW PATH safepoint +++ { +++ __ block_comment("safepoint {"); +++ __ bind(safepoint_in_progress); +++ +++ // Don't use call_VM as it will see a possible pending exception and forward it +++ // and never return here preventing us from clearing _last_native_pc down below. +++ // +++ save_native_result(masm, ret_type, stack_slots); +++ __ mv(c_rarg0, xthread); +++#ifndef PRODUCT +++ assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area"); +++#endif +++ RuntimeAddress target(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)); +++ __ relocate(target.rspec(), [&] { +++ int32_t offset; +++ __ la_patchable(t0, target, offset); +++ __ jalr(x1, t0, offset); +++ }); +++ +++ // Restore any method result value +++ restore_native_result(masm, ret_type, stack_slots); +++ +++ __ j(safepoint_in_progress_done); +++ __ block_comment("} safepoint"); +++ } +++ +++ // SLOW PATH dtrace support +++ { +++ __ block_comment("dtrace entry {"); +++ __ bind(dtrace_method_entry); +++ +++ // We have all of the arguments setup at this point. We must not touch any register +++ // argument registers at this point (what if we save/restore them there are no oop? +++ +++ save_args(masm, total_c_args, c_arg, out_regs); +++ __ mov_metadata(c_rarg1, method()); +++ __ call_VM_leaf( +++ CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), +++ xthread, c_rarg1); +++ restore_args(masm, total_c_args, c_arg, out_regs); +++ __ j(dtrace_method_entry_done); +++ __ block_comment("} dtrace entry"); +++ } +++ +++ { +++ __ block_comment("dtrace exit {"); +++ __ bind(dtrace_method_exit); +++ save_native_result(masm, ret_type, stack_slots); +++ __ mov_metadata(c_rarg1, method()); +++ __ call_VM_leaf( +++ CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), +++ xthread, c_rarg1); +++ restore_native_result(masm, ret_type, stack_slots); +++ __ j(dtrace_method_exit_done); +++ __ block_comment("} dtrace exit"); +++ } +++ +++ __ flush(); +++ +++ nmethod *nm = nmethod::new_native_nmethod(method, +++ compile_id, +++ masm->code(), +++ vep_offset, +++ frame_complete, +++ stack_slots / VMRegImpl::slots_per_word, +++ (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)), +++ in_ByteSize(lock_slot_offset*VMRegImpl::stack_slot_size), +++ oop_maps); +++ assert(nm != NULL, "create native nmethod fail!"); +++ return nm; +++} +++ +++// this function returns the adjust size (in number of words) to a c2i adapter +++// activation for use during deoptimization +++int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals) { +++ assert(callee_locals >= callee_parameters, +++ "test and remove; got more parms than locals"); +++ if (callee_locals < callee_parameters) { +++ return 0; // No adjustment for negative locals +++ } +++ int diff = (callee_locals - callee_parameters) * Interpreter::stackElementWords; +++ // diff is counted in stack words +++ return align_up(diff, 2); +++} +++ +++//------------------------------generate_deopt_blob---------------------------- +++void SharedRuntime::generate_deopt_blob() { +++ // Allocate space for the code +++ ResourceMark rm; +++ // Setup code generation tools +++ int pad = 0; +++ CodeBuffer buffer("deopt_blob", 2048 + pad, 1024); +++ MacroAssembler* masm = new MacroAssembler(&buffer); +++ int frame_size_in_words = -1; +++ OopMap* map = NULL; +++ OopMapSet *oop_maps = new OopMapSet(); +++ assert_cond(masm != NULL && oop_maps != NULL); +++ RegisterSaver reg_saver(COMPILER2_OR_JVMCI != 0); +++ +++ // ------------- +++ // This code enters when returning to a de-optimized nmethod. A return +++ // address has been pushed on the the stack, and return values are in +++ // registers. +++ // If we are doing a normal deopt then we were called from the patched +++ // nmethod from the point we returned to the nmethod. So the return +++ // address on the stack is wrong by NativeCall::instruction_size +++ // We will adjust the value so it looks like we have the original return +++ // address on the stack (like when we eagerly deoptimized). +++ // In the case of an exception pending when deoptimizing, we enter +++ // with a return address on the stack that points after the call we patched +++ // into the exception handler. We have the following register state from, +++ // e.g., the forward exception stub (see stubGenerator_riscv.cpp). +++ // x10: exception oop +++ // x9: exception handler +++ // x13: throwing pc +++ // So in this case we simply jam x13 into the useless return address and +++ // the stack looks just like we want. +++ // +++ // At this point we need to de-opt. We save the argument return +++ // registers. We call the first C routine, fetch_unroll_info(). This +++ // routine captures the return values and returns a structure which +++ // describes the current frame size and the sizes of all replacement frames. +++ // The current frame is compiled code and may contain many inlined +++ // functions, each with their own JVM state. We pop the current frame, then +++ // push all the new frames. Then we call the C routine unpack_frames() to +++ // populate these frames. Finally unpack_frames() returns us the new target +++ // address. Notice that callee-save registers are BLOWN here; they have +++ // already been captured in the vframeArray at the time the return PC was +++ // patched. +++ address start = __ pc(); +++ Label cont; +++ +++ // Prolog for non exception case! +++ +++ // Save everything in sight. +++ map = reg_saver.save_live_registers(masm, 0, &frame_size_in_words); +++ +++ // Normal deoptimization. Save exec mode for unpack_frames. +++ __ mv(xcpool, Deoptimization::Unpack_deopt); // callee-saved +++ __ j(cont); +++ +++ int reexecute_offset = __ pc() - start; +++ +++ // Reexecute case +++ // return address is the pc describes what bci to do re-execute at +++ +++ // No need to update map as each call to save_live_registers will produce identical oopmap +++ (void) reg_saver.save_live_registers(masm, 0, &frame_size_in_words); +++ +++ __ mv(xcpool, Deoptimization::Unpack_reexecute); // callee-saved +++ __ j(cont); +++ +++ int exception_offset = __ pc() - start; +++ +++ // Prolog for exception case +++ +++ // all registers are dead at this entry point, except for x10, and +++ // x13 which contain the exception oop and exception pc +++ // respectively. Set them in TLS and fall thru to the +++ // unpack_with_exception_in_tls entry point. +++ +++ __ sd(x13, Address(xthread, JavaThread::exception_pc_offset())); +++ __ sd(x10, Address(xthread, JavaThread::exception_oop_offset())); +++ +++ int exception_in_tls_offset = __ pc() - start; +++ +++ // new implementation because exception oop is now passed in JavaThread +++ +++ // Prolog for exception case +++ // All registers must be preserved because they might be used by LinearScan +++ // Exceptiop oop and throwing PC are passed in JavaThread +++ // tos: stack at point of call to method that threw the exception (i.e. only +++ // args are on the stack, no return address) +++ +++ // The return address pushed by save_live_registers will be patched +++ // later with the throwing pc. The correct value is not available +++ // now because loading it from memory would destroy registers. +++ +++ // NB: The SP at this point must be the SP of the method that is +++ // being deoptimized. Deoptimization assumes that the frame created +++ // here by save_live_registers is immediately below the method's SP. +++ // This is a somewhat fragile mechanism. +++ +++ // Save everything in sight. +++ map = reg_saver.save_live_registers(masm, 0, &frame_size_in_words); +++ +++ // Now it is safe to overwrite any register +++ +++ // Deopt during an exception. Save exec mode for unpack_frames. +++ __ mv(xcpool, Deoptimization::Unpack_exception); // callee-saved +++ +++ // load throwing pc from JavaThread and patch it as the return address +++ // of the current frame. Then clear the field in JavaThread +++ +++ __ ld(x13, Address(xthread, JavaThread::exception_pc_offset())); +++ __ sd(x13, Address(fp, frame::return_addr_offset * wordSize)); +++ __ sd(zr, Address(xthread, JavaThread::exception_pc_offset())); +++ +++#ifdef ASSERT +++ // verify that there is really an exception oop in JavaThread +++ __ ld(x10, Address(xthread, JavaThread::exception_oop_offset())); +++ __ verify_oop(x10); +++ +++ // verify that there is no pending exception +++ Label no_pending_exception; +++ __ ld(t0, Address(xthread, Thread::pending_exception_offset())); +++ __ beqz(t0, no_pending_exception); +++ __ stop("must not have pending exception here"); +++ __ bind(no_pending_exception); +++#endif +++ +++ __ bind(cont); +++ +++ // Call C code. Need thread and this frame, but NOT official VM entry +++ // crud. We cannot block on this call, no GC can happen. +++ // +++ // UnrollBlock* fetch_unroll_info(JavaThread* thread) +++ +++ // fetch_unroll_info needs to call last_java_frame(). +++ +++ Label retaddr; +++ __ set_last_Java_frame(sp, noreg, retaddr, t0); +++#ifdef ASSERT +++ { +++ Label L; +++ __ ld(t0, Address(xthread, +++ JavaThread::last_Java_fp_offset())); +++ __ beqz(t0, L); +++ __ stop("SharedRuntime::generate_deopt_blob: last_Java_fp not cleared"); +++ __ bind(L); +++ } +++#endif // ASSERT +++ __ mv(c_rarg0, xthread); +++ __ mv(c_rarg1, xcpool); +++ RuntimeAddress target(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info)); +++ __ relocate(target.rspec(), [&] { +++ int32_t offset; +++ __ la_patchable(t0, target, offset); +++ __ jalr(x1, t0, offset); +++ }); +++ __ bind(retaddr); +++ +++ // Need to have an oopmap that tells fetch_unroll_info where to +++ // find any register it might need. +++ oop_maps->add_gc_map(__ pc() - start, map); +++ +++ __ reset_last_Java_frame(false); +++ +++ // Load UnrollBlock* into x15 +++ __ mv(x15, x10); +++ +++ __ lwu(xcpool, Address(x15, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes())); +++ Label noException; +++ __ mv(t0, Deoptimization::Unpack_exception); +++ __ bne(xcpool, t0, noException); // Was exception pending? +++ __ ld(x10, Address(xthread, JavaThread::exception_oop_offset())); +++ __ ld(x13, Address(xthread, JavaThread::exception_pc_offset())); +++ __ sd(zr, Address(xthread, JavaThread::exception_oop_offset())); +++ __ sd(zr, Address(xthread, JavaThread::exception_pc_offset())); +++ +++ __ verify_oop(x10); +++ +++ // Overwrite the result registers with the exception results. +++ __ sd(x10, Address(sp, reg_saver.reg_offset_in_bytes(x10))); +++ +++ __ bind(noException); +++ +++ // Only register save data is on the stack. +++ // Now restore the result registers. Everything else is either dead +++ // or captured in the vframeArray. +++ +++ // Restore fp result register +++ __ fld(f10, Address(sp, reg_saver.freg_offset_in_bytes(f10))); +++ // Restore integer result register +++ __ ld(x10, Address(sp, reg_saver.reg_offset_in_bytes(x10))); +++ +++ // Pop all of the register save area off the stack +++ __ add(sp, sp, frame_size_in_words * wordSize); +++ +++ // All of the register save area has been popped of the stack. Only the +++ // return address remains. +++ +++ // Pop all the frames we must move/replace. +++ // +++ // Frame picture (youngest to oldest) +++ // 1: self-frame (no frame link) +++ // 2: deopting frame (no frame link) +++ // 3: caller of deopting frame (could be compiled/interpreted). +++ // +++ // Note: by leaving the return address of self-frame on the stack +++ // and using the size of frame 2 to adjust the stack +++ // when we are done the return to frame 3 will still be on the stack. +++ +++ // Pop deoptimized frame +++ __ lwu(x12, Address(x15, Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset_in_bytes())); +++ __ sub(x12, x12, 2 * wordSize); +++ __ add(sp, sp, x12); +++ __ ld(fp, Address(sp, 0)); +++ __ ld(ra, Address(sp, wordSize)); +++ __ addi(sp, sp, 2 * wordSize); +++ // RA should now be the return address to the caller (3) +++ +++#ifdef ASSERT +++ // Compilers generate code that bang the stack by as much as the +++ // interpreter would need. So this stack banging should never +++ // trigger a fault. Verify that it does not on non product builds. +++ __ lwu(x9, Address(x15, Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes())); +++ __ bang_stack_size(x9, x12); +++#endif +++ // Load address of array of frame pcs into x12 +++ __ ld(x12, Address(x15, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes())); +++ +++ // Load address of array of frame sizes into x14 +++ __ ld(x14, Address(x15, Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes())); +++ +++ // Load counter into x13 +++ __ lwu(x13, Address(x15, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes())); +++ +++ // Now adjust the caller's stack to make up for the extra locals +++ // but record the original sp so that we can save it in the skeletal interpreter +++ // frame and the stack walking of interpreter_sender will get the unextended sp +++ // value and not the "real" sp value. +++ +++ const Register sender_sp = x16; +++ +++ __ mv(sender_sp, sp); +++ __ lwu(x9, Address(x15, +++ Deoptimization::UnrollBlock:: +++ caller_adjustment_offset_in_bytes())); +++ __ sub(sp, sp, x9); +++ +++ // Push interpreter frames in a loop +++ __ mv(t0, 0xDEADDEAD); // Make a recognizable pattern +++ __ mv(t1, t0); +++ Label loop; +++ __ bind(loop); +++ __ ld(x9, Address(x14, 0)); // Load frame size +++ __ addi(x14, x14, wordSize); +++ __ sub(x9, x9, 2 * wordSize); // We'll push pc and fp by hand +++ __ ld(ra, Address(x12, 0)); // Load pc +++ __ addi(x12, x12, wordSize); +++ __ enter(); // Save old & set new fp +++ __ sub(sp, sp, x9); // Prolog +++ // This value is corrected by layout_activation_impl +++ __ sd(zr, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize)); +++ __ sd(sender_sp, Address(fp, frame::interpreter_frame_sender_sp_offset * wordSize)); // Make it walkable +++ __ mv(sender_sp, sp); // Pass sender_sp to next frame +++ __ addi(x13, x13, -1); // Decrement counter +++ __ bnez(x13, loop); +++ +++ // Re-push self-frame +++ __ ld(ra, Address(x12)); +++ __ enter(); +++ +++ // Allocate a full sized register save area. We subtract 2 because +++ // enter() just pushed 2 words +++ __ sub(sp, sp, (frame_size_in_words - 2) * wordSize); +++ +++ // Restore frame locals after moving the frame +++ __ fsd(f10, Address(sp, reg_saver.freg_offset_in_bytes(f10))); +++ __ sd(x10, Address(sp, reg_saver.reg_offset_in_bytes(x10))); +++ +++ // Call C code. Need thread but NOT official VM entry +++ // crud. We cannot block on this call, no GC can happen. Call should +++ // restore return values to their stack-slots with the new SP. +++ // +++ // void Deoptimization::unpack_frames(JavaThread* thread, int exec_mode) +++ +++ // Use fp because the frames look interpreted now +++ // Don't need the precise return PC here, just precise enough to point into this code blob. +++ address the_pc = __ pc(); +++ __ set_last_Java_frame(sp, fp, the_pc, t0); +++ +++ __ mv(c_rarg0, xthread); +++ __ mv(c_rarg1, xcpool); // second arg: exec_mode +++ target = RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)); +++ __ relocate(target.rspec(), [&] { +++ int32_t offset; +++ __ la_patchable(t0, target, offset); +++ __ jalr(x1, t0, offset); +++ }); +++ +++ // Set an oopmap for the call site +++ // Use the same PC we used for the last java frame +++ oop_maps->add_gc_map(the_pc - start, +++ new OopMap(frame_size_in_words, 0)); +++ +++ // Clear fp AND pc +++ __ reset_last_Java_frame(true); +++ +++ // Collect return values +++ __ fld(f10, Address(sp, reg_saver.freg_offset_in_bytes(f10))); +++ __ ld(x10, Address(sp, reg_saver.reg_offset_in_bytes(x10))); +++ +++ // Pop self-frame. +++ __ leave(); // Epilog +++ +++ // Jump to interpreter +++ __ ret(); +++ +++ // Make sure all code is generated +++ masm->flush(); +++ +++ _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_in_words); +++ assert(_deopt_blob != NULL, "create deoptimization blob fail!"); +++ _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset); +++} +++ +++// Number of stack slots between incoming argument block and the start of +++// a new frame. The PROLOG must add this many slots to the stack. The +++// EPILOG must remove this many slots. +++// RISCV needs two words for RA (return address) and FP (frame pointer). +++uint SharedRuntime::in_preserve_stack_slots() { +++ return 2 * VMRegImpl::slots_per_word; +++} +++ +++uint SharedRuntime::out_preserve_stack_slots() { +++ return 0; +++} +++ +++#ifdef COMPILER2 +++//------------------------------generate_uncommon_trap_blob-------------------- +++void SharedRuntime::generate_uncommon_trap_blob() { +++ // Allocate space for the code +++ ResourceMark rm; +++ // Setup code generation tools +++ CodeBuffer buffer("uncommon_trap_blob", 2048, 1024); +++ MacroAssembler* masm = new MacroAssembler(&buffer); +++ assert_cond(masm != NULL); +++ +++ assert(SimpleRuntimeFrame::framesize % 4 == 0, "sp not 16-byte aligned"); +++ +++ address start = __ pc(); +++ +++ // Push self-frame. We get here with a return address in RA +++ // and sp should be 16 byte aligned +++ // push fp and retaddr by hand +++ __ addi(sp, sp, -2 * wordSize); +++ __ sd(ra, Address(sp, wordSize)); +++ __ sd(fp, Address(sp, 0)); +++ // we don't expect an arg reg save area +++#ifndef PRODUCT +++ assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area"); +++#endif +++ // compiler left unloaded_class_index in j_rarg0 move to where the +++ // runtime expects it. +++ __ sign_extend(c_rarg1, j_rarg0, 32); +++ +++ // we need to set the past SP to the stack pointer of the stub frame +++ // and the pc to the address where this runtime call will return +++ // although actually any pc in this code blob will do). +++ Label retaddr; +++ __ set_last_Java_frame(sp, noreg, retaddr, t0); +++ +++ // Call C code. Need thread but NOT official VM entry +++ // crud. We cannot block on this call, no GC can happen. Call should +++ // capture callee-saved registers as well as return values. +++ // +++ // UnrollBlock* uncommon_trap(JavaThread* thread, jint unloaded_class_index, jint exec_mode) +++ // +++ // n.b. 3 gp args, 0 fp args, integral return type +++ +++ __ mv(c_rarg0, xthread); +++ __ mv(c_rarg2, Deoptimization::Unpack_uncommon_trap); +++ RuntimeAddress target(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap)); +++ __ relocate(target.rspec(), [&] { +++ int32_t offset; +++ __ la_patchable(t0, target, offset); +++ __ jalr(x1, t0, offset); +++ }); +++ __ bind(retaddr); +++ +++ // Set an oopmap for the call site +++ OopMapSet* oop_maps = new OopMapSet(); +++ OopMap* map = new OopMap(SimpleRuntimeFrame::framesize, 0); +++ assert_cond(oop_maps != NULL && map != NULL); +++ +++ // location of fp is known implicitly by the frame sender code +++ +++ oop_maps->add_gc_map(__ pc() - start, map); +++ +++ __ reset_last_Java_frame(false); +++ +++ // move UnrollBlock* into x14 +++ __ mv(x14, x10); +++ +++#ifdef ASSERT +++ { Label L; +++ __ lwu(t0, Address(x14, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes())); +++ __ mv(t1, Deoptimization::Unpack_uncommon_trap); +++ __ beq(t0, t1, L); +++ __ stop("SharedRuntime::generate_deopt_blob: last_Java_fp not cleared"); +++ __ bind(L); +++ } +++#endif +++ +++ // Pop all the frames we must move/replace. +++ // +++ // Frame picture (youngest to oldest) +++ // 1: self-frame (no frame link) +++ // 2: deopting frame (no frame link) +++ // 3: caller of deopting frame (could be compiled/interpreted). +++ +++ __ add(sp, sp, (SimpleRuntimeFrame::framesize) << LogBytesPerInt); // Epilog! +++ +++ // Pop deoptimized frame (int) +++ __ lwu(x12, Address(x14, +++ Deoptimization::UnrollBlock:: +++ size_of_deoptimized_frame_offset_in_bytes())); +++ __ sub(x12, x12, 2 * wordSize); +++ __ add(sp, sp, x12); +++ __ ld(fp, Address(sp, 0)); +++ __ ld(ra, Address(sp, wordSize)); +++ __ addi(sp, sp, 2 * wordSize); +++ // RA should now be the return address to the caller (3) frame +++ +++#ifdef ASSERT +++ // Compilers generate code that bang the stack by as much as the +++ // interpreter would need. So this stack banging should never +++ // trigger a fault. Verify that it does not on non product builds. +++ __ lwu(x11, Address(x14, +++ Deoptimization::UnrollBlock:: +++ total_frame_sizes_offset_in_bytes())); +++ __ bang_stack_size(x11, x12); +++#endif +++ +++ // Load address of array of frame pcs into x12 (address*) +++ __ ld(x12, Address(x14, +++ Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes())); +++ +++ // Load address of array of frame sizes into x15 (intptr_t*) +++ __ ld(x15, Address(x14, +++ Deoptimization::UnrollBlock:: +++ frame_sizes_offset_in_bytes())); +++ +++ // Counter +++ __ lwu(x13, Address(x14, +++ Deoptimization::UnrollBlock:: +++ number_of_frames_offset_in_bytes())); // (int) +++ +++ // Now adjust the caller's stack to make up for the extra locals but +++ // record the original sp so that we can save it in the skeletal +++ // interpreter frame and the stack walking of interpreter_sender +++ // will get the unextended sp value and not the "real" sp value. +++ +++ const Register sender_sp = t1; // temporary register +++ +++ __ lwu(x11, Address(x14, +++ Deoptimization::UnrollBlock:: +++ caller_adjustment_offset_in_bytes())); // (int) +++ __ mv(sender_sp, sp); +++ __ sub(sp, sp, x11); +++ +++ // Push interpreter frames in a loop +++ Label loop; +++ __ bind(loop); +++ __ ld(x11, Address(x15, 0)); // Load frame size +++ __ sub(x11, x11, 2 * wordSize); // We'll push pc and fp by hand +++ __ ld(ra, Address(x12, 0)); // Save return address +++ __ enter(); // and old fp & set new fp +++ __ sub(sp, sp, x11); // Prolog +++ __ sd(sender_sp, Address(fp, frame::interpreter_frame_sender_sp_offset * wordSize)); // Make it walkable +++ // This value is corrected by layout_activation_impl +++ __ sd(zr, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize)); +++ __ mv(sender_sp, sp); // Pass sender_sp to next frame +++ __ add(x15, x15, wordSize); // Bump array pointer (sizes) +++ __ add(x12, x12, wordSize); // Bump array pointer (pcs) +++ __ subw(x13, x13, 1); // Decrement counter +++ __ bgtz(x13, loop); +++ __ ld(ra, Address(x12, 0)); // save final return address +++ // Re-push self-frame +++ __ enter(); // & old fp & set new fp +++ +++ // Use fp because the frames look interpreted now +++ // Save "the_pc" since it cannot easily be retrieved using the last_java_SP after we aligned SP. +++ // Don't need the precise return PC here, just precise enough to point into this code blob. +++ address the_pc = __ pc(); +++ __ set_last_Java_frame(sp, fp, the_pc, t0); +++ +++ // Call C code. Need thread but NOT official VM entry +++ // crud. We cannot block on this call, no GC can happen. Call should +++ // restore return values to their stack-slots with the new SP. +++ // +++ // BasicType unpack_frames(JavaThread* thread, int exec_mode) +++ // +++ +++ // n.b. 2 gp args, 0 fp args, integral return type +++ +++ // sp should already be aligned +++ __ mv(c_rarg0, xthread); +++ __ mv(c_rarg1, Deoptimization::Unpack_uncommon_trap); +++ target = RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)); +++ __ relocate(target.rspec(), [&] { +++ int32_t offset; +++ __ la_patchable(t0, target, offset); +++ __ jalr(x1, t0, offset); +++ }); +++ +++ // Set an oopmap for the call site +++ // Use the same PC we used for the last java frame +++ oop_maps->add_gc_map(the_pc - start, new OopMap(SimpleRuntimeFrame::framesize, 0)); +++ +++ // Clear fp AND pc +++ __ reset_last_Java_frame(true); +++ +++ // Pop self-frame. +++ __ leave(); // Epilog +++ +++ // Jump to interpreter +++ __ ret(); +++ +++ // Make sure all code is generated +++ masm->flush(); +++ +++ _uncommon_trap_blob = UncommonTrapBlob::create(&buffer, oop_maps, +++ SimpleRuntimeFrame::framesize >> 1); +++} +++#endif // COMPILER2 +++ +++//------------------------------generate_handler_blob------ +++// +++// Generate a special Compile2Runtime blob that saves all registers, +++// and setup oopmap. +++// +++SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_type) { +++ ResourceMark rm; +++ OopMapSet *oop_maps = new OopMapSet(); +++ assert_cond(oop_maps != NULL); +++ OopMap* map = NULL; +++ +++ // Allocate space for the code. Setup code generation tools. +++ CodeBuffer buffer("handler_blob", 2048, 1024); +++ MacroAssembler* masm = new MacroAssembler(&buffer); +++ assert_cond(masm != NULL); +++ +++ address start = __ pc(); +++ address call_pc = NULL; +++ int frame_size_in_words = -1; +++ bool cause_return = (poll_type == POLL_AT_RETURN); +++ RegisterSaver reg_saver(poll_type == POLL_AT_VECTOR_LOOP /* save_vectors */); +++ +++ // Save Integer and Float registers. +++ map = reg_saver.save_live_registers(masm, 0, &frame_size_in_words); +++ +++ // The following is basically a call_VM. However, we need the precise +++ // address of the call in order to generate an oopmap. Hence, we do all the +++ // work outselves. +++ +++ Label retaddr; +++ __ set_last_Java_frame(sp, noreg, retaddr, t0); +++ +++ // The return address must always be correct so that frame constructor never +++ // sees an invalid pc. +++ +++ if (!cause_return) { +++ // overwrite the return address pushed by save_live_registers +++ // Additionally, x18 is a callee-saved register so we can look at +++ // it later to determine if someone changed the return address for +++ // us! +++ __ ld(x18, Address(xthread, JavaThread::saved_exception_pc_offset())); +++ __ sd(x18, Address(fp, frame::return_addr_offset * wordSize)); +++ } +++ +++ // Do the call +++ __ mv(c_rarg0, xthread); +++ RuntimeAddress target(call_ptr); +++ __ relocate(target.rspec(), [&] { +++ int32_t offset; +++ __ la_patchable(t0, target, offset); +++ __ jalr(x1, t0, offset); +++ }); +++ __ bind(retaddr); +++ +++ // Set an oopmap for the call site. This oopmap will map all +++ // oop-registers and debug-info registers as callee-saved. This +++ // will allow deoptimization at this safepoint to find all possible +++ // debug-info recordings, as well as let GC find all oops. +++ +++ oop_maps->add_gc_map( __ pc() - start, map); +++ +++ Label noException; +++ +++ __ reset_last_Java_frame(false); +++ +++ __ membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore); +++ +++ __ ld(t0, Address(xthread, Thread::pending_exception_offset())); +++ __ beqz(t0, noException); +++ +++ // Exception pending +++ +++ reg_saver.restore_live_registers(masm); +++ +++ __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry())); +++ +++ // No exception case +++ __ bind(noException); +++ +++ Label no_adjust, bail; +++ if (!cause_return) { +++ // If our stashed return pc was modified by the runtime we avoid touching it +++ __ ld(t0, Address(fp, frame::return_addr_offset * wordSize)); +++ __ bne(x18, t0, no_adjust); +++ +++#ifdef ASSERT +++ // Verify the correct encoding of the poll we're about to skip. +++ // See NativeInstruction::is_lwu_to_zr() +++ __ lwu(t0, Address(x18)); +++ __ andi(t1, t0, 0b0000011); +++ __ mv(t2, 0b0000011); +++ __ bne(t1, t2, bail); // 0-6:0b0000011 +++ __ srli(t1, t0, 7); +++ __ andi(t1, t1, 0b00000); +++ __ bnez(t1, bail); // 7-11:0b00000 +++ __ srli(t1, t0, 12); +++ __ andi(t1, t1, 0b110); +++ __ mv(t2, 0b110); +++ __ bne(t1, t2, bail); // 12-14:0b110 +++#endif +++ // Adjust return pc forward to step over the safepoint poll instruction +++ __ add(x18, x18, NativeInstruction::instruction_size); +++ __ sd(x18, Address(fp, frame::return_addr_offset * wordSize)); +++ } +++ +++ __ bind(no_adjust); +++ // Normal exit, restore registers and exit. +++ +++ reg_saver.restore_live_registers(masm); +++ __ ret(); +++ +++#ifdef ASSERT +++ __ bind(bail); +++ __ stop("Attempting to adjust pc to skip safepoint poll but the return point is not what we expected"); +++#endif +++ +++ // Make sure all code is generated +++ masm->flush(); +++ +++ // Fill-out other meta info +++ return SafepointBlob::create(&buffer, oop_maps, frame_size_in_words); +++} +++ +++// +++// generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss +++// +++// Generate a stub that calls into vm to find out the proper destination +++// of a java call. All the argument registers are live at this point +++// but since this is generic code we don't know what they are and the caller +++// must do any gc of the args. +++// +++RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) { +++ assert(StubRoutines::forward_exception_entry() != NULL, "must be generated before"); +++ +++ // allocate space for the code +++ ResourceMark rm; +++ +++ CodeBuffer buffer(name, 1000, 512); +++ MacroAssembler* masm = new MacroAssembler(&buffer); +++ assert_cond(masm != NULL); +++ +++ int frame_size_in_words = -1; +++ RegisterSaver reg_saver(false /* save_vectors */); +++ +++ OopMapSet *oop_maps = new OopMapSet(); +++ assert_cond(oop_maps != NULL); +++ OopMap* map = NULL; +++ +++ int start = __ offset(); +++ +++ map = reg_saver.save_live_registers(masm, 0, &frame_size_in_words); +++ +++ int frame_complete = __ offset(); +++ +++ { +++ Label retaddr; +++ __ set_last_Java_frame(sp, noreg, retaddr, t0); +++ +++ __ mv(c_rarg0, xthread); +++ RuntimeAddress target(destination); +++ __ relocate(target.rspec(), [&] { +++ int32_t offset; +++ __ la_patchable(t0, target, offset); +++ __ jalr(x1, t0, offset); +++ }); +++ __ bind(retaddr); +++ } +++ +++ // Set an oopmap for the call site. +++ // We need this not only for callee-saved registers, but also for volatile +++ // registers that the compiler might be keeping live across a safepoint. +++ +++ oop_maps->add_gc_map( __ offset() - start, map); +++ +++ // x10 contains the address we are going to jump to assuming no exception got installed +++ +++ // clear last_Java_sp +++ __ reset_last_Java_frame(false); +++ // check for pending exceptions +++ Label pending; +++ __ ld(t0, Address(xthread, Thread::pending_exception_offset())); +++ __ bnez(t0, pending); +++ +++ // get the returned Method* +++ __ get_vm_result_2(xmethod, xthread); +++ __ sd(xmethod, Address(sp, reg_saver.reg_offset_in_bytes(xmethod))); +++ +++ // x10 is where we want to jump, overwrite t0 which is saved and temporary +++ __ sd(x10, Address(sp, reg_saver.reg_offset_in_bytes(t0))); +++ reg_saver.restore_live_registers(masm); +++ +++ // We are back the the original state on entry and ready to go. +++ +++ __ jr(t0); +++ +++ // Pending exception after the safepoint +++ +++ __ bind(pending); +++ +++ reg_saver.restore_live_registers(masm); +++ +++ // exception pending => remove activation and forward to exception handler +++ +++ __ sd(zr, Address(xthread, JavaThread::vm_result_offset())); +++ +++ __ ld(x10, Address(xthread, Thread::pending_exception_offset())); +++ __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry())); +++ +++ // ------------- +++ // make sure all code is generated +++ masm->flush(); +++ +++ // return the blob +++ return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_in_words, oop_maps, true); +++} +++ +++#ifdef COMPILER2 +++RuntimeStub* SharedRuntime::make_native_invoker(address call_target, +++ int shadow_space_bytes, +++ const GrowableArray& input_registers, +++ const GrowableArray& output_registers) { +++ Unimplemented(); +++ return nullptr; +++} +++ +++//------------------------------generate_exception_blob--------------------------- +++// creates exception blob at the end +++// Using exception blob, this code is jumped from a compiled method. +++// (see emit_exception_handler in riscv.ad file) +++// +++// Given an exception pc at a call we call into the runtime for the +++// handler in this method. This handler might merely restore state +++// (i.e. callee save registers) unwind the frame and jump to the +++// exception handler for the nmethod if there is no Java level handler +++// for the nmethod. +++// +++// This code is entered with a jmp. +++// +++// Arguments: +++// x10: exception oop +++// x13: exception pc +++// +++// Results: +++// x10: exception oop +++// x13: exception pc in caller +++// destination: exception handler of caller +++// +++// Note: the exception pc MUST be at a call (precise debug information) +++// Registers x10, x13, x12, x14, x15, t0 are not callee saved. +++// +++ +++void OptoRuntime::generate_exception_blob() { +++ assert(!OptoRuntime::is_callee_saved_register(R13_num), ""); +++ assert(!OptoRuntime::is_callee_saved_register(R10_num), ""); +++ assert(!OptoRuntime::is_callee_saved_register(R12_num), ""); +++ +++ assert(SimpleRuntimeFrame::framesize % 4 == 0, "sp not 16-byte aligned"); +++ +++ // Allocate space for the code +++ ResourceMark rm; +++ // Setup code generation tools +++ CodeBuffer buffer("exception_blob", 2048, 1024); +++ MacroAssembler* masm = new MacroAssembler(&buffer); +++ assert_cond(masm != NULL); +++ +++ // TODO check various assumptions made here +++ // +++ // make sure we do so before running this +++ +++ address start = __ pc(); +++ +++ // push fp and retaddr by hand +++ // Exception pc is 'return address' for stack walker +++ __ addi(sp, sp, -2 * wordSize); +++ __ sd(ra, Address(sp, wordSize)); +++ __ sd(fp, Address(sp)); +++ // there are no callee save registers and we don't expect an +++ // arg reg save area +++#ifndef PRODUCT +++ assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area"); +++#endif +++ // Store exception in Thread object. We cannot pass any arguments to the +++ // handle_exception call, since we do not want to make any assumption +++ // about the size of the frame where the exception happened in. +++ __ sd(x10, Address(xthread, JavaThread::exception_oop_offset())); +++ __ sd(x13, Address(xthread, JavaThread::exception_pc_offset())); +++ +++ // This call does all the hard work. It checks if an exception handler +++ // exists in the method. +++ // If so, it returns the handler address. +++ // If not, it prepares for stack-unwinding, restoring the callee-save +++ // registers of the frame being removed. +++ // +++ // address OptoRuntime::handle_exception_C(JavaThread* thread) +++ // +++ // n.b. 1 gp arg, 0 fp args, integral return type +++ +++ // the stack should always be aligned +++ address the_pc = __ pc(); +++ __ set_last_Java_frame(sp, noreg, the_pc, t0); +++ __ mv(c_rarg0, xthread); +++ RuntimeAddress target(CAST_FROM_FN_PTR(address, OptoRuntime::handle_exception_C)); +++ __ relocate(target.rspec(), [&] { +++ int32_t offset; +++ __ la_patchable(t0, target, offset); +++ __ jalr(x1, t0, offset); +++ }); +++ +++ +++ // handle_exception_C is a special VM call which does not require an explicit +++ // instruction sync afterwards. +++ +++ // Set an oopmap for the call site. This oopmap will only be used if we +++ // are unwinding the stack. Hence, all locations will be dead. +++ // Callee-saved registers will be the same as the frame above (i.e., +++ // handle_exception_stub), since they were restored when we got the +++ // exception. +++ +++ OopMapSet* oop_maps = new OopMapSet(); +++ assert_cond(oop_maps != NULL); +++ +++ oop_maps->add_gc_map(the_pc - start, new OopMap(SimpleRuntimeFrame::framesize, 0)); +++ +++ __ reset_last_Java_frame(false); +++ +++ // Restore callee-saved registers +++ +++ // fp is an implicitly saved callee saved register (i.e. the calling +++ // convention will save restore it in prolog/epilog) Other than that +++ // there are no callee save registers now that adapter frames are gone. +++ // and we dont' expect an arg reg save area +++ __ ld(fp, Address(sp)); +++ __ ld(x13, Address(sp, wordSize)); +++ __ addi(sp, sp , 2 * wordSize); +++ +++ // x10: exception handler +++ +++ // We have a handler in x10 (could be deopt blob). +++ __ mv(t0, x10); +++ +++ // Get the exception oop +++ __ ld(x10, Address(xthread, JavaThread::exception_oop_offset())); +++ // Get the exception pc in case we are deoptimized +++ __ ld(x14, Address(xthread, JavaThread::exception_pc_offset())); +++#ifdef ASSERT +++ __ sd(zr, Address(xthread, JavaThread::exception_handler_pc_offset())); +++ __ sd(zr, Address(xthread, JavaThread::exception_pc_offset())); +++#endif +++ // Clear the exception oop so GC no longer processes it as a root. +++ __ sd(zr, Address(xthread, JavaThread::exception_oop_offset())); +++ +++ // x10: exception oop +++ // t0: exception handler +++ // x14: exception pc +++ // Jump to handler +++ +++ __ jr(t0); +++ +++ // Make sure all code is generated +++ masm->flush(); +++ +++ // Set exception blob +++ _exception_blob = ExceptionBlob::create(&buffer, oop_maps, SimpleRuntimeFrame::framesize >> 1); +++} +++#endif // COMPILER2 ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/stubGenerator_riscv.cpp ++@@ -0,0 +1,3959 @@ +++/* +++ * Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved. +++ * Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#include "precompiled.hpp" +++#include "asm/macroAssembler.hpp" +++#include "asm/macroAssembler.inline.hpp" +++#include "compiler/oopMap.hpp" +++#include "gc/shared/barrierSet.hpp" +++#include "gc/shared/barrierSetAssembler.hpp" +++#include "interpreter/interpreter.hpp" +++#include "memory/universe.hpp" +++#include "nativeInst_riscv.hpp" +++#include "oops/instanceOop.hpp" +++#include "oops/method.hpp" +++#include "oops/objArrayKlass.hpp" +++#include "oops/oop.inline.hpp" +++#include "prims/methodHandles.hpp" +++#include "runtime/frame.inline.hpp" +++#include "runtime/handles.inline.hpp" +++#include "runtime/sharedRuntime.hpp" +++#include "runtime/stubCodeGenerator.hpp" +++#include "runtime/stubRoutines.hpp" +++#include "runtime/thread.inline.hpp" +++#include "utilities/align.hpp" +++#include "utilities/powerOfTwo.hpp" +++#ifdef COMPILER2 +++#include "opto/runtime.hpp" +++#endif +++#if INCLUDE_ZGC +++#include "gc/z/zThreadLocalData.hpp" +++#endif +++ +++// Declaration and definition of StubGenerator (no .hpp file). +++// For a more detailed description of the stub routine structure +++// see the comment in stubRoutines.hpp +++ +++#undef __ +++#define __ _masm-> +++ +++#ifdef PRODUCT +++#define BLOCK_COMMENT(str) /* nothing */ +++#else +++#define BLOCK_COMMENT(str) __ block_comment(str) +++#endif +++ +++#define BIND(label) bind(label); BLOCK_COMMENT(#label ":") +++ +++// Stub Code definitions +++ +++class StubGenerator: public StubCodeGenerator { +++ private: +++ +++#ifdef PRODUCT +++#define inc_counter_np(counter) ((void)0) +++#else +++ void inc_counter_np_(int& counter) { +++ __ la(t1, ExternalAddress((address)&counter)); +++ __ lwu(t0, Address(t1, 0)); +++ __ addiw(t0, t0, 1); +++ __ sw(t0, Address(t1, 0)); +++ } +++#define inc_counter_np(counter) \ +++ BLOCK_COMMENT("inc_counter " #counter); \ +++ inc_counter_np_(counter); +++#endif +++ +++ // Call stubs are used to call Java from C +++ // +++ // Arguments: +++ // c_rarg0: call wrapper address address +++ // c_rarg1: result address +++ // c_rarg2: result type BasicType +++ // c_rarg3: method Method* +++ // c_rarg4: (interpreter) entry point address +++ // c_rarg5: parameters intptr_t* +++ // c_rarg6: parameter size (in words) int +++ // c_rarg7: thread Thread* +++ // +++ // There is no return from the stub itself as any Java result +++ // is written to result +++ // +++ // we save x1 (ra) as the return PC at the base of the frame and +++ // link x8 (fp) below it as the frame pointer installing sp (x2) +++ // into fp. +++ // +++ // we save x10-x17, which accounts for all the c arguments. +++ // +++ // TODO: strictly do we need to save them all? they are treated as +++ // volatile by C so could we omit saving the ones we are going to +++ // place in global registers (thread? method?) or those we only use +++ // during setup of the Java call? +++ // +++ // we don't need to save x5 which C uses as an indirect result location +++ // return register. +++ // +++ // we don't need to save x6-x7 and x28-x31 which both C and Java treat as +++ // volatile +++ // +++ // we save x9, x18-x27, f8-f9, and f18-f27 which Java uses as temporary +++ // registers and C expects to be callee-save +++ // +++ // so the stub frame looks like this when we enter Java code +++ // +++ // [ return_from_Java ] <--- sp +++ // [ argument word n ] +++ // ... +++ // -34 [ argument word 1 ] +++ // -33 [ saved f27 ] <--- sp_after_call +++ // -32 [ saved f26 ] +++ // -31 [ saved f25 ] +++ // -30 [ saved f24 ] +++ // -29 [ saved f23 ] +++ // -28 [ saved f22 ] +++ // -27 [ saved f21 ] +++ // -26 [ saved f20 ] +++ // -25 [ saved f19 ] +++ // -24 [ saved f18 ] +++ // -23 [ saved f9 ] +++ // -22 [ saved f8 ] +++ // -21 [ saved x27 ] +++ // -20 [ saved x26 ] +++ // -19 [ saved x25 ] +++ // -18 [ saved x24 ] +++ // -17 [ saved x23 ] +++ // -16 [ saved x22 ] +++ // -15 [ saved x21 ] +++ // -14 [ saved x20 ] +++ // -13 [ saved x19 ] +++ // -12 [ saved x18 ] +++ // -11 [ saved x9 ] +++ // -10 [ call wrapper (x10) ] +++ // -9 [ result (x11) ] +++ // -8 [ result type (x12) ] +++ // -7 [ method (x13) ] +++ // -6 [ entry point (x14) ] +++ // -5 [ parameters (x15) ] +++ // -4 [ parameter size (x16) ] +++ // -3 [ thread (x17) ] +++ // -2 [ saved fp (x8) ] +++ // -1 [ saved ra (x1) ] +++ // 0 [ ] <--- fp == saved sp (x2) +++ +++ // Call stub stack layout word offsets from fp +++ enum call_stub_layout { +++ sp_after_call_off = -33, +++ +++ f27_off = -33, +++ f26_off = -32, +++ f25_off = -31, +++ f24_off = -30, +++ f23_off = -29, +++ f22_off = -28, +++ f21_off = -27, +++ f20_off = -26, +++ f19_off = -25, +++ f18_off = -24, +++ f9_off = -23, +++ f8_off = -22, +++ +++ x27_off = -21, +++ x26_off = -20, +++ x25_off = -19, +++ x24_off = -18, +++ x23_off = -17, +++ x22_off = -16, +++ x21_off = -15, +++ x20_off = -14, +++ x19_off = -13, +++ x18_off = -12, +++ x9_off = -11, +++ +++ call_wrapper_off = -10, +++ result_off = -9, +++ result_type_off = -8, +++ method_off = -7, +++ entry_point_off = -6, +++ parameters_off = -5, +++ parameter_size_off = -4, +++ thread_off = -3, +++ fp_f = -2, +++ retaddr_off = -1, +++ }; +++ +++ address generate_call_stub(address& return_address) { +++ assert((int)frame::entry_frame_after_call_words == -(int)sp_after_call_off + 1 && +++ (int)frame::entry_frame_call_wrapper_offset == (int)call_wrapper_off, +++ "adjust this code"); +++ +++ StubCodeMark mark(this, "StubRoutines", "call_stub"); +++ address start = __ pc(); +++ +++ const Address sp_after_call (fp, sp_after_call_off * wordSize); +++ +++ const Address call_wrapper (fp, call_wrapper_off * wordSize); +++ const Address result (fp, result_off * wordSize); +++ const Address result_type (fp, result_type_off * wordSize); +++ const Address method (fp, method_off * wordSize); +++ const Address entry_point (fp, entry_point_off * wordSize); +++ const Address parameters (fp, parameters_off * wordSize); +++ const Address parameter_size(fp, parameter_size_off * wordSize); +++ +++ const Address thread (fp, thread_off * wordSize); +++ +++ const Address f27_save (fp, f27_off * wordSize); +++ const Address f26_save (fp, f26_off * wordSize); +++ const Address f25_save (fp, f25_off * wordSize); +++ const Address f24_save (fp, f24_off * wordSize); +++ const Address f23_save (fp, f23_off * wordSize); +++ const Address f22_save (fp, f22_off * wordSize); +++ const Address f21_save (fp, f21_off * wordSize); +++ const Address f20_save (fp, f20_off * wordSize); +++ const Address f19_save (fp, f19_off * wordSize); +++ const Address f18_save (fp, f18_off * wordSize); +++ const Address f9_save (fp, f9_off * wordSize); +++ const Address f8_save (fp, f8_off * wordSize); +++ +++ const Address x27_save (fp, x27_off * wordSize); +++ const Address x26_save (fp, x26_off * wordSize); +++ const Address x25_save (fp, x25_off * wordSize); +++ const Address x24_save (fp, x24_off * wordSize); +++ const Address x23_save (fp, x23_off * wordSize); +++ const Address x22_save (fp, x22_off * wordSize); +++ const Address x21_save (fp, x21_off * wordSize); +++ const Address x20_save (fp, x20_off * wordSize); +++ const Address x19_save (fp, x19_off * wordSize); +++ const Address x18_save (fp, x18_off * wordSize); +++ +++ const Address x9_save (fp, x9_off * wordSize); +++ +++ // stub code +++ +++ address riscv_entry = __ pc(); +++ +++ // set up frame and move sp to end of save area +++ __ enter(); +++ __ addi(sp, fp, sp_after_call_off * wordSize); +++ +++ // save register parameters and Java temporary/global registers +++ // n.b. we save thread even though it gets installed in +++ // xthread because we want to sanity check tp later +++ __ sd(c_rarg7, thread); +++ __ sw(c_rarg6, parameter_size); +++ __ sd(c_rarg5, parameters); +++ __ sd(c_rarg4, entry_point); +++ __ sd(c_rarg3, method); +++ __ sd(c_rarg2, result_type); +++ __ sd(c_rarg1, result); +++ __ sd(c_rarg0, call_wrapper); +++ +++ __ sd(x9, x9_save); +++ +++ __ sd(x18, x18_save); +++ __ sd(x19, x19_save); +++ __ sd(x20, x20_save); +++ __ sd(x21, x21_save); +++ __ sd(x22, x22_save); +++ __ sd(x23, x23_save); +++ __ sd(x24, x24_save); +++ __ sd(x25, x25_save); +++ __ sd(x26, x26_save); +++ __ sd(x27, x27_save); +++ +++ __ fsd(f8, f8_save); +++ __ fsd(f9, f9_save); +++ __ fsd(f18, f18_save); +++ __ fsd(f19, f19_save); +++ __ fsd(f20, f20_save); +++ __ fsd(f21, f21_save); +++ __ fsd(f22, f22_save); +++ __ fsd(f23, f23_save); +++ __ fsd(f24, f24_save); +++ __ fsd(f25, f25_save); +++ __ fsd(f26, f26_save); +++ __ fsd(f27, f27_save); +++ +++ // install Java thread in global register now we have saved +++ // whatever value it held +++ __ mv(xthread, c_rarg7); +++ +++ // And method +++ __ mv(xmethod, c_rarg3); +++ +++ // set up the heapbase register +++ __ reinit_heapbase(); +++ +++#ifdef ASSERT +++ // make sure we have no pending exceptions +++ { +++ Label L; +++ __ ld(t0, Address(xthread, in_bytes(Thread::pending_exception_offset()))); +++ __ beqz(t0, L); +++ __ stop("StubRoutines::call_stub: entered with pending exception"); +++ __ BIND(L); +++ } +++#endif +++ // pass parameters if any +++ __ mv(esp, sp); +++ __ slli(t0, c_rarg6, LogBytesPerWord); +++ __ sub(t0, sp, t0); // Move SP out of the way +++ __ andi(sp, t0, -2 * wordSize); +++ +++ BLOCK_COMMENT("pass parameters if any"); +++ Label parameters_done; +++ // parameter count is still in c_rarg6 +++ // and parameter pointer identifying param 1 is in c_rarg5 +++ __ beqz(c_rarg6, parameters_done); +++ +++ address loop = __ pc(); +++ __ ld(t0, Address(c_rarg5, 0)); +++ __ addi(c_rarg5, c_rarg5, wordSize); +++ __ addi(c_rarg6, c_rarg6, -1); +++ __ push_reg(t0); +++ __ bgtz(c_rarg6, loop); +++ +++ __ BIND(parameters_done); +++ +++ // call Java entry -- passing methdoOop, and current sp +++ // xmethod: Method* +++ // x30: sender sp +++ BLOCK_COMMENT("call Java function"); +++ __ mv(x30, sp); +++ __ jalr(c_rarg4); +++ +++ // save current address for use by exception handling code +++ +++ return_address = __ pc(); +++ +++ // store result depending on type (everything that is not +++ // T_OBJECT, T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT) +++ // n.b. this assumes Java returns an integral result in x10 +++ // and a floating result in j_farg0 +++ __ ld(j_rarg2, result); +++ Label is_long, is_float, is_double, exit; +++ __ ld(j_rarg1, result_type); +++ __ mv(t0, (u1)T_OBJECT); +++ __ beq(j_rarg1, t0, is_long); +++ __ mv(t0, (u1)T_LONG); +++ __ beq(j_rarg1, t0, is_long); +++ __ mv(t0, (u1)T_FLOAT); +++ __ beq(j_rarg1, t0, is_float); +++ __ mv(t0, (u1)T_DOUBLE); +++ __ beq(j_rarg1, t0, is_double); +++ +++ // handle T_INT case +++ __ sw(x10, Address(j_rarg2)); +++ +++ __ BIND(exit); +++ +++ // pop parameters +++ __ addi(esp, fp, sp_after_call_off * wordSize); +++ +++#ifdef ASSERT +++ // verify that threads correspond +++ { +++ Label L, S; +++ __ ld(t0, thread); +++ __ bne(xthread, t0, S); +++ __ get_thread(t0); +++ __ beq(xthread, t0, L); +++ __ BIND(S); +++ __ stop("StubRoutines::call_stub: threads must correspond"); +++ __ BIND(L); +++ } +++#endif +++ +++ // restore callee-save registers +++ __ fld(f27, f27_save); +++ __ fld(f26, f26_save); +++ __ fld(f25, f25_save); +++ __ fld(f24, f24_save); +++ __ fld(f23, f23_save); +++ __ fld(f22, f22_save); +++ __ fld(f21, f21_save); +++ __ fld(f20, f20_save); +++ __ fld(f19, f19_save); +++ __ fld(f18, f18_save); +++ __ fld(f9, f9_save); +++ __ fld(f8, f8_save); +++ +++ __ ld(x27, x27_save); +++ __ ld(x26, x26_save); +++ __ ld(x25, x25_save); +++ __ ld(x24, x24_save); +++ __ ld(x23, x23_save); +++ __ ld(x22, x22_save); +++ __ ld(x21, x21_save); +++ __ ld(x20, x20_save); +++ __ ld(x19, x19_save); +++ __ ld(x18, x18_save); +++ +++ __ ld(x9, x9_save); +++ +++ __ ld(c_rarg0, call_wrapper); +++ __ ld(c_rarg1, result); +++ __ ld(c_rarg2, result_type); +++ __ ld(c_rarg3, method); +++ __ ld(c_rarg4, entry_point); +++ __ ld(c_rarg5, parameters); +++ __ ld(c_rarg6, parameter_size); +++ __ ld(c_rarg7, thread); +++ +++ // leave frame and return to caller +++ __ leave(); +++ __ ret(); +++ +++ // handle return types different from T_INT +++ +++ __ BIND(is_long); +++ __ sd(x10, Address(j_rarg2, 0)); +++ __ j(exit); +++ +++ __ BIND(is_float); +++ __ fsw(j_farg0, Address(j_rarg2, 0), t0); +++ __ j(exit); +++ +++ __ BIND(is_double); +++ __ fsd(j_farg0, Address(j_rarg2, 0), t0); +++ __ j(exit); +++ +++ return start; +++ } +++ +++ // Return point for a Java call if there's an exception thrown in +++ // Java code. The exception is caught and transformed into a +++ // pending exception stored in JavaThread that can be tested from +++ // within the VM. +++ // +++ // Note: Usually the parameters are removed by the callee. In case +++ // of an exception crossing an activation frame boundary, that is +++ // not the case if the callee is compiled code => need to setup the +++ // sp. +++ // +++ // x10: exception oop +++ +++ address generate_catch_exception() { +++ StubCodeMark mark(this, "StubRoutines", "catch_exception"); +++ address start = __ pc(); +++ +++ // same as in generate_call_stub(): +++ const Address thread(fp, thread_off * wordSize); +++ +++#ifdef ASSERT +++ // verify that threads correspond +++ { +++ Label L, S; +++ __ ld(t0, thread); +++ __ bne(xthread, t0, S); +++ __ get_thread(t0); +++ __ beq(xthread, t0, L); +++ __ bind(S); +++ __ stop("StubRoutines::catch_exception: threads must correspond"); +++ __ bind(L); +++ } +++#endif +++ +++ // set pending exception +++ __ verify_oop(x10); +++ +++ __ sd(x10, Address(xthread, Thread::pending_exception_offset())); +++ __ mv(t0, (address)__FILE__); +++ __ sd(t0, Address(xthread, Thread::exception_file_offset())); +++ __ mv(t0, (int)__LINE__); +++ __ sw(t0, Address(xthread, Thread::exception_line_offset())); +++ +++ // complete return to VM +++ assert(StubRoutines::_call_stub_return_address != NULL, +++ "_call_stub_return_address must have been generated before"); +++ __ j(StubRoutines::_call_stub_return_address); +++ +++ return start; +++ } +++ +++ // Continuation point for runtime calls returning with a pending +++ // exception. The pending exception check happened in the runtime +++ // or native call stub. The pending exception in Thread is +++ // converted into a Java-level exception. +++ // +++ // Contract with Java-level exception handlers: +++ // x10: exception +++ // x13: throwing pc +++ // +++ // NOTE: At entry of this stub, exception-pc must be in RA !! +++ +++ // NOTE: this is always used as a jump target within generated code +++ // so it just needs to be generated code with no x86 prolog +++ +++ address generate_forward_exception() { +++ StubCodeMark mark(this, "StubRoutines", "forward exception"); +++ address start = __ pc(); +++ +++ // Upon entry, RA points to the return address returning into +++ // Java (interpreted or compiled) code; i.e., the return address +++ // becomes the throwing pc. +++ // +++ // Arguments pushed before the runtime call are still on the stack +++ // but the exception handler will reset the stack pointer -> +++ // ignore them. A potential result in registers can be ignored as +++ // well. +++ +++#ifdef ASSERT +++ // make sure this code is only executed if there is a pending exception +++ { +++ Label L; +++ __ ld(t0, Address(xthread, Thread::pending_exception_offset())); +++ __ bnez(t0, L); +++ __ stop("StubRoutines::forward exception: no pending exception (1)"); +++ __ bind(L); +++ } +++#endif +++ +++ // compute exception handler into x9 +++ +++ // call the VM to find the handler address associated with the +++ // caller address. pass thread in x10 and caller pc (ret address) +++ // in x11. n.b. the caller pc is in ra, unlike x86 where it is on +++ // the stack. +++ __ mv(c_rarg1, ra); +++ // ra will be trashed by the VM call so we move it to x9 +++ // (callee-saved) because we also need to pass it to the handler +++ // returned by this call. +++ __ mv(x9, ra); +++ BLOCK_COMMENT("call exception_handler_for_return_address"); +++ __ call_VM_leaf(CAST_FROM_FN_PTR(address, +++ SharedRuntime::exception_handler_for_return_address), +++ xthread, c_rarg1); +++ // we should not really care that ra is no longer the callee +++ // address. we saved the value the handler needs in x9 so we can +++ // just copy it to x13. however, the C2 handler will push its own +++ // frame and then calls into the VM and the VM code asserts that +++ // the PC for the frame above the handler belongs to a compiled +++ // Java method. So, we restore ra here to satisfy that assert. +++ __ mv(ra, x9); +++ // setup x10 & x13 & clear pending exception +++ __ mv(x13, x9); +++ __ mv(x9, x10); +++ __ ld(x10, Address(xthread, Thread::pending_exception_offset())); +++ __ sd(zr, Address(xthread, Thread::pending_exception_offset())); +++ +++#ifdef ASSERT +++ // make sure exception is set +++ { +++ Label L; +++ __ bnez(x10, L); +++ __ stop("StubRoutines::forward exception: no pending exception (2)"); +++ __ bind(L); +++ } +++#endif +++ +++ // continue at exception handler +++ // x10: exception +++ // x13: throwing pc +++ // x9: exception handler +++ __ verify_oop(x10); +++ __ jr(x9); +++ +++ return start; +++ } +++ +++ // Non-destructive plausibility checks for oops +++ // +++ // Arguments: +++ // x10: oop to verify +++ // t0: error message +++ // +++ // Stack after saving c_rarg3: +++ // [tos + 0]: saved c_rarg3 +++ // [tos + 1]: saved c_rarg2 +++ // [tos + 2]: saved ra +++ // [tos + 3]: saved t1 +++ // [tos + 4]: saved x10 +++ // [tos + 5]: saved t0 +++ address generate_verify_oop() { +++ +++ StubCodeMark mark(this, "StubRoutines", "verify_oop"); +++ address start = __ pc(); +++ +++ Label exit, error; +++ +++ __ push_reg(RegSet::of(c_rarg2, c_rarg3), sp); // save c_rarg2 and c_rarg3 +++ +++ __ la(c_rarg2, ExternalAddress((address) StubRoutines::verify_oop_count_addr())); +++ __ ld(c_rarg3, Address(c_rarg2)); +++ __ add(c_rarg3, c_rarg3, 1); +++ __ sd(c_rarg3, Address(c_rarg2)); +++ +++ // object is in x10 +++ // make sure object is 'reasonable' +++ __ beqz(x10, exit); // if obj is NULL it is OK +++ +++#if INCLUDE_ZGC +++ if (UseZGC) { +++ // Check if mask is good. +++ // verifies that ZAddressBadMask & x10 == 0 +++ __ ld(c_rarg3, Address(xthread, ZThreadLocalData::address_bad_mask_offset())); +++ __ andr(c_rarg2, x10, c_rarg3); +++ __ bnez(c_rarg2, error); +++ } +++#endif +++ +++ // Check if the oop is in the right area of memory +++ __ mv(c_rarg3, (intptr_t) Universe::verify_oop_mask()); +++ __ andr(c_rarg2, x10, c_rarg3); +++ __ mv(c_rarg3, (intptr_t) Universe::verify_oop_bits()); +++ +++ // Compare c_rarg2 and c_rarg3. +++ __ bne(c_rarg2, c_rarg3, error); +++ +++ // make sure klass is 'reasonable', which is not zero. +++ __ load_klass(x10, x10); // get klass +++ __ beqz(x10, error); // if klass is NULL it is broken +++ +++ // return if everything seems ok +++ __ bind(exit); +++ +++ __ pop_reg(RegSet::of(c_rarg2, c_rarg3), sp); // pop c_rarg2 and c_rarg3 +++ __ ret(); +++ +++ // handle errors +++ __ bind(error); +++ __ pop_reg(RegSet::of(c_rarg2, c_rarg3), sp); // pop c_rarg2 and c_rarg3 +++ +++ __ push_reg(RegSet::range(x0, x31), sp); +++ // debug(char* msg, int64_t pc, int64_t regs[]) +++ __ mv(c_rarg0, t0); // pass address of error message +++ __ mv(c_rarg1, ra); // pass return address +++ __ mv(c_rarg2, sp); // pass address of regs on stack +++#ifndef PRODUCT +++ assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area"); +++#endif +++ BLOCK_COMMENT("call MacroAssembler::debug"); +++ __ call(CAST_FROM_FN_PTR(address, MacroAssembler::debug64)); +++ __ ebreak(); +++ +++ return start; +++ } +++ +++ // The inner part of zero_words(). +++ // +++ // Inputs: +++ // x28: the HeapWord-aligned base address of an array to zero. +++ // x29: the count in HeapWords, x29 > 0. +++ // +++ // Returns x28 and x29, adjusted for the caller to clear. +++ // x28: the base address of the tail of words left to clear. +++ // x29: the number of words in the tail. +++ // x29 < MacroAssembler::zero_words_block_size. +++ +++ address generate_zero_blocks() { +++ Label done; +++ +++ const Register base = x28, cnt = x29; +++ +++ __ align(CodeEntryAlignment); +++ StubCodeMark mark(this, "StubRoutines", "zero_blocks"); +++ address start = __ pc(); +++ +++ { +++ // Clear the remaining blocks. +++ Label loop; +++ __ sub(cnt, cnt, MacroAssembler::zero_words_block_size); +++ __ bltz(cnt, done); +++ __ bind(loop); +++ for (int i = 0; i < MacroAssembler::zero_words_block_size; i++) { +++ __ sd(zr, Address(base, 0)); +++ __ add(base, base, 8); +++ } +++ __ sub(cnt, cnt, MacroAssembler::zero_words_block_size); +++ __ bgez(cnt, loop); +++ __ bind(done); +++ __ add(cnt, cnt, MacroAssembler::zero_words_block_size); +++ } +++ +++ __ ret(); +++ +++ return start; +++ } +++ +++ typedef enum { +++ copy_forwards = 1, +++ copy_backwards = -1 +++ } copy_direction; +++ +++ // Bulk copy of blocks of 8 words. +++ // +++ // count is a count of words. +++ // +++ // Precondition: count >= 8 +++ // +++ // Postconditions: +++ // +++ // The least significant bit of count contains the remaining count +++ // of words to copy. The rest of count is trash. +++ // +++ // s and d are adjusted to point to the remaining words to copy +++ // +++ void generate_copy_longs(Label &start, Register s, Register d, Register count, +++ copy_direction direction) { +++ int unit = wordSize * direction; +++ int bias = wordSize; +++ +++ const Register tmp_reg0 = x13, tmp_reg1 = x14, tmp_reg2 = x15, tmp_reg3 = x16, +++ tmp_reg4 = x17, tmp_reg5 = x7, tmp_reg6 = x28, tmp_reg7 = x29; +++ +++ const Register stride = x30; +++ +++ assert_different_registers(t0, tmp_reg0, tmp_reg1, tmp_reg2, tmp_reg3, +++ tmp_reg4, tmp_reg5, tmp_reg6, tmp_reg7); +++ assert_different_registers(s, d, count, t0); +++ +++ Label again, drain; +++ const char* stub_name = NULL; +++ if (direction == copy_forwards) { +++ stub_name = "forward_copy_longs"; +++ } else { +++ stub_name = "backward_copy_longs"; +++ } +++ StubCodeMark mark(this, "StubRoutines", stub_name); +++ __ align(CodeEntryAlignment); +++ __ bind(start); +++ +++ if (direction == copy_forwards) { +++ __ sub(s, s, bias); +++ __ sub(d, d, bias); +++ } +++ +++#ifdef ASSERT +++ // Make sure we are never given < 8 words +++ { +++ Label L; +++ +++ __ mv(t0, 8); +++ __ bge(count, t0, L); +++ __ stop("genrate_copy_longs called with < 8 words"); +++ __ bind(L); +++ } +++#endif +++ +++ __ ld(tmp_reg0, Address(s, 1 * unit)); +++ __ ld(tmp_reg1, Address(s, 2 * unit)); +++ __ ld(tmp_reg2, Address(s, 3 * unit)); +++ __ ld(tmp_reg3, Address(s, 4 * unit)); +++ __ ld(tmp_reg4, Address(s, 5 * unit)); +++ __ ld(tmp_reg5, Address(s, 6 * unit)); +++ __ ld(tmp_reg6, Address(s, 7 * unit)); +++ __ ld(tmp_reg7, Address(s, 8 * unit)); +++ __ addi(s, s, 8 * unit); +++ +++ __ sub(count, count, 16); +++ __ bltz(count, drain); +++ +++ __ bind(again); +++ +++ __ sd(tmp_reg0, Address(d, 1 * unit)); +++ __ sd(tmp_reg1, Address(d, 2 * unit)); +++ __ sd(tmp_reg2, Address(d, 3 * unit)); +++ __ sd(tmp_reg3, Address(d, 4 * unit)); +++ __ sd(tmp_reg4, Address(d, 5 * unit)); +++ __ sd(tmp_reg5, Address(d, 6 * unit)); +++ __ sd(tmp_reg6, Address(d, 7 * unit)); +++ __ sd(tmp_reg7, Address(d, 8 * unit)); +++ +++ __ ld(tmp_reg0, Address(s, 1 * unit)); +++ __ ld(tmp_reg1, Address(s, 2 * unit)); +++ __ ld(tmp_reg2, Address(s, 3 * unit)); +++ __ ld(tmp_reg3, Address(s, 4 * unit)); +++ __ ld(tmp_reg4, Address(s, 5 * unit)); +++ __ ld(tmp_reg5, Address(s, 6 * unit)); +++ __ ld(tmp_reg6, Address(s, 7 * unit)); +++ __ ld(tmp_reg7, Address(s, 8 * unit)); +++ +++ __ addi(s, s, 8 * unit); +++ __ addi(d, d, 8 * unit); +++ +++ __ sub(count, count, 8); +++ __ bgez(count, again); +++ +++ // Drain +++ __ bind(drain); +++ +++ __ sd(tmp_reg0, Address(d, 1 * unit)); +++ __ sd(tmp_reg1, Address(d, 2 * unit)); +++ __ sd(tmp_reg2, Address(d, 3 * unit)); +++ __ sd(tmp_reg3, Address(d, 4 * unit)); +++ __ sd(tmp_reg4, Address(d, 5 * unit)); +++ __ sd(tmp_reg5, Address(d, 6 * unit)); +++ __ sd(tmp_reg6, Address(d, 7 * unit)); +++ __ sd(tmp_reg7, Address(d, 8 * unit)); +++ __ addi(d, d, 8 * unit); +++ +++ { +++ Label L1, L2; +++ __ test_bit(t0, count, 2); +++ __ beqz(t0, L1); +++ +++ __ ld(tmp_reg0, Address(s, 1 * unit)); +++ __ ld(tmp_reg1, Address(s, 2 * unit)); +++ __ ld(tmp_reg2, Address(s, 3 * unit)); +++ __ ld(tmp_reg3, Address(s, 4 * unit)); +++ __ addi(s, s, 4 * unit); +++ +++ __ sd(tmp_reg0, Address(d, 1 * unit)); +++ __ sd(tmp_reg1, Address(d, 2 * unit)); +++ __ sd(tmp_reg2, Address(d, 3 * unit)); +++ __ sd(tmp_reg3, Address(d, 4 * unit)); +++ __ addi(d, d, 4 * unit); +++ +++ __ bind(L1); +++ +++ if (direction == copy_forwards) { +++ __ addi(s, s, bias); +++ __ addi(d, d, bias); +++ } +++ +++ __ test_bit(t0, count, 1); +++ __ beqz(t0, L2); +++ if (direction == copy_backwards) { +++ __ addi(s, s, 2 * unit); +++ __ ld(tmp_reg0, Address(s)); +++ __ ld(tmp_reg1, Address(s, wordSize)); +++ __ addi(d, d, 2 * unit); +++ __ sd(tmp_reg0, Address(d)); +++ __ sd(tmp_reg1, Address(d, wordSize)); +++ } else { +++ __ ld(tmp_reg0, Address(s)); +++ __ ld(tmp_reg1, Address(s, wordSize)); +++ __ addi(s, s, 2 * unit); +++ __ sd(tmp_reg0, Address(d)); +++ __ sd(tmp_reg1, Address(d, wordSize)); +++ __ addi(d, d, 2 * unit); +++ } +++ __ bind(L2); +++ } +++ +++ __ ret(); +++ } +++ +++ Label copy_f, copy_b; +++ +++ // All-singing all-dancing memory copy. +++ // +++ // Copy count units of memory from s to d. The size of a unit is +++ // step, which can be positive or negative depending on the direction +++ // of copy. If is_aligned is false, we align the source address. +++ // +++ /* +++ * if (is_aligned) { +++ * if (count >= 32) +++ * goto copy32_loop; +++ * if (count >= 8) +++ * goto copy8_loop; +++ * goto copy_small; +++ * } +++ * bool is_backwards = step < 0; +++ * int granularity = uabs(step); +++ * count = count * granularity; * count bytes +++ * +++ * if (is_backwards) { +++ * s += count; +++ * d += count; +++ * } +++ * +++ * count limit maybe greater than 16, for better performance +++ * if (count < 16) { +++ * goto copy_small; +++ * } +++ * +++ * if ((dst % 8) == (src % 8)) { +++ * aligned; +++ * goto copy_big; +++ * } +++ * +++ * copy_big: +++ * if the amount to copy is more than (or equal to) 32 bytes goto copy32_loop +++ * else goto copy8_loop +++ * copy_small: +++ * load element one by one; +++ * done; +++ */ +++ +++ typedef void (MacroAssembler::*copy_insn)(Register Rd, const Address &adr, Register temp); +++ +++ void copy_memory_v(Register s, Register d, Register count, Register tmp, int step) { +++ bool is_backward = step < 0; +++ int granularity = uabs(step); +++ +++ const Register src = x30, dst = x31, vl = x14, cnt = x15, tmp1 = x16, tmp2 = x17; +++ assert_different_registers(s, d, cnt, vl, tmp, tmp1, tmp2); +++ Assembler::SEW sew = Assembler::elembytes_to_sew(granularity); +++ Label loop_forward, loop_backward, done; +++ +++ __ mv(dst, d); +++ __ mv(src, s); +++ __ mv(cnt, count); +++ +++ __ bind(loop_forward); +++ __ vsetvli(vl, cnt, sew, Assembler::m8); +++ if (is_backward) { +++ __ bne(vl, cnt, loop_backward); +++ } +++ +++ __ vlex_v(v0, src, sew); +++ __ sub(cnt, cnt, vl); +++ __ slli(vl, vl, (int)sew); +++ __ add(src, src, vl); +++ +++ __ vsex_v(v0, dst, sew); +++ __ add(dst, dst, vl); +++ __ bnez(cnt, loop_forward); +++ +++ if (is_backward) { +++ __ j(done); +++ +++ __ bind(loop_backward); +++ __ sub(tmp, cnt, vl); +++ __ slli(tmp, tmp, sew); +++ __ add(tmp1, s, tmp); +++ __ vlex_v(v0, tmp1, sew); +++ __ add(tmp2, d, tmp); +++ __ vsex_v(v0, tmp2, sew); +++ __ sub(cnt, cnt, vl); +++ __ bnez(cnt, loop_forward); +++ __ bind(done); +++ } +++ } +++ +++ void copy_memory(bool is_aligned, Register s, Register d, +++ Register count, Register tmp, int step) { +++ if (UseRVV) { +++ return copy_memory_v(s, d, count, tmp, step); +++ } +++ +++ bool is_backwards = step < 0; +++ int granularity = uabs(step); +++ +++ const Register src = x30, dst = x31, cnt = x15, tmp3 = x16, tmp4 = x17, tmp5 = x14, tmp6 = x13; +++ +++ Label same_aligned; +++ Label copy_big, copy32_loop, copy8_loop, copy_small, done; +++ +++ copy_insn ld_arr = NULL, st_arr = NULL; +++ switch (granularity) { +++ case 1 : +++ ld_arr = (copy_insn)&MacroAssembler::lbu; +++ st_arr = (copy_insn)&MacroAssembler::sb; +++ break; +++ case 2 : +++ ld_arr = (copy_insn)&MacroAssembler::lhu; +++ st_arr = (copy_insn)&MacroAssembler::sh; +++ break; +++ case 4 : +++ ld_arr = (copy_insn)&MacroAssembler::lwu; +++ st_arr = (copy_insn)&MacroAssembler::sw; +++ break; +++ case 8 : +++ ld_arr = (copy_insn)&MacroAssembler::ld; +++ st_arr = (copy_insn)&MacroAssembler::sd; +++ break; +++ default : +++ ShouldNotReachHere(); +++ } +++ +++ __ beqz(count, done); +++ __ slli(cnt, count, exact_log2(granularity)); +++ if (is_backwards) { +++ __ add(src, s, cnt); +++ __ add(dst, d, cnt); +++ } else { +++ __ mv(src, s); +++ __ mv(dst, d); +++ } +++ +++ if (is_aligned) { +++ __ addi(tmp, cnt, -32); +++ __ bgez(tmp, copy32_loop); +++ __ addi(tmp, cnt, -8); +++ __ bgez(tmp, copy8_loop); +++ __ j(copy_small); +++ } else { +++ __ mv(tmp, 16); +++ __ blt(cnt, tmp, copy_small); +++ +++ __ xorr(tmp, src, dst); +++ __ andi(tmp, tmp, 0b111); +++ __ bnez(tmp, copy_small); +++ +++ __ bind(same_aligned); +++ __ andi(tmp, src, 0b111); +++ __ beqz(tmp, copy_big); +++ if (is_backwards) { +++ __ addi(src, src, step); +++ __ addi(dst, dst, step); +++ } +++ (_masm->*ld_arr)(tmp3, Address(src), t0); +++ (_masm->*st_arr)(tmp3, Address(dst), t0); +++ if (!is_backwards) { +++ __ addi(src, src, step); +++ __ addi(dst, dst, step); +++ } +++ __ addi(cnt, cnt, -granularity); +++ __ beqz(cnt, done); +++ __ j(same_aligned); +++ +++ __ bind(copy_big); +++ __ mv(tmp, 32); +++ __ blt(cnt, tmp, copy8_loop); +++ } +++ __ bind(copy32_loop); +++ if (is_backwards) { +++ __ addi(src, src, -wordSize * 4); +++ __ addi(dst, dst, -wordSize * 4); +++ } +++ // we first load 32 bytes, then write it, so the direction here doesn't matter +++ __ ld(tmp3, Address(src)); +++ __ ld(tmp4, Address(src, 8)); +++ __ ld(tmp5, Address(src, 16)); +++ __ ld(tmp6, Address(src, 24)); +++ __ sd(tmp3, Address(dst)); +++ __ sd(tmp4, Address(dst, 8)); +++ __ sd(tmp5, Address(dst, 16)); +++ __ sd(tmp6, Address(dst, 24)); +++ +++ if (!is_backwards) { +++ __ addi(src, src, wordSize * 4); +++ __ addi(dst, dst, wordSize * 4); +++ } +++ __ addi(tmp, cnt, -(32 + wordSize * 4)); +++ __ addi(cnt, cnt, -wordSize * 4); +++ __ bgez(tmp, copy32_loop); // cnt >= 32, do next loop +++ +++ __ beqz(cnt, done); // if that's all - done +++ +++ __ addi(tmp, cnt, -8); // if not - copy the reminder +++ __ bltz(tmp, copy_small); // cnt < 8, go to copy_small, else fall throught to copy8_loop +++ +++ __ bind(copy8_loop); +++ if (is_backwards) { +++ __ addi(src, src, -wordSize); +++ __ addi(dst, dst, -wordSize); +++ } +++ __ ld(tmp3, Address(src)); +++ __ sd(tmp3, Address(dst)); +++ if (!is_backwards) { +++ __ addi(src, src, wordSize); +++ __ addi(dst, dst, wordSize); +++ } +++ __ addi(tmp, cnt, -(8 + wordSize)); +++ __ addi(cnt, cnt, -wordSize); +++ __ bgez(tmp, copy8_loop); // cnt >= 8, do next loop +++ +++ __ beqz(cnt, done); // if that's all - done +++ +++ __ bind(copy_small); +++ if (is_backwards) { +++ __ addi(src, src, step); +++ __ addi(dst, dst, step); +++ } +++ (_masm->*ld_arr)(tmp3, Address(src), t0); +++ (_masm->*st_arr)(tmp3, Address(dst), t0); +++ if (!is_backwards) { +++ __ addi(src, src, step); +++ __ addi(dst, dst, step); +++ } +++ __ addi(cnt, cnt, -granularity); +++ __ bgtz(cnt, copy_small); +++ +++ __ bind(done); +++ } +++ +++ // Scan over array at a for count oops, verifying each one. +++ // Preserves a and count, clobbers t0 and t1. +++ void verify_oop_array(size_t size, Register a, Register count, Register temp) { +++ Label loop, end; +++ __ mv(t1, zr); +++ __ slli(t0, count, exact_log2(size)); +++ __ bind(loop); +++ __ bgeu(t1, t0, end); +++ +++ __ add(temp, a, t1); +++ if (size == (size_t)wordSize) { +++ __ ld(temp, Address(temp, 0)); +++ __ verify_oop(temp); +++ } else { +++ __ lwu(temp, Address(temp, 0)); +++ __ decode_heap_oop(temp); // calls verify_oop +++ } +++ __ add(t1, t1, size); +++ __ j(loop); +++ __ bind(end); +++ } +++ +++ // Arguments: +++ // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary +++ // ignored +++ // is_oop - true => oop array, so generate store check code +++ // name - stub name string +++ // +++ // Inputs: +++ // c_rarg0 - source array address +++ // c_rarg1 - destination array address +++ // c_rarg2 - element count, treated as ssize_t, can be zero +++ // +++ // If 'from' and/or 'to' are aligned on 4-byte boundaries, we let +++ // the hardware handle it. The two dwords within qwords that span +++ // cache line boundaries will still be loaded and stored atomicly. +++ // +++ // Side Effects: +++ // disjoint_int_copy_entry is set to the no-overlap entry point +++ // used by generate_conjoint_int_oop_copy(). +++ // +++ address generate_disjoint_copy(size_t size, bool aligned, bool is_oop, address* entry, +++ const char* name, bool dest_uninitialized = false) { +++ const Register s = c_rarg0, d = c_rarg1, count = c_rarg2; +++ RegSet saved_reg = RegSet::of(s, d, count); +++ __ align(CodeEntryAlignment); +++ StubCodeMark mark(this, "StubRoutines", name); +++ address start = __ pc(); +++ __ enter(); +++ +++ if (entry != NULL) { +++ *entry = __ pc(); +++ // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) +++ BLOCK_COMMENT("Entry:"); +++ } +++ +++ DecoratorSet decorators = IN_HEAP | IS_ARRAY | ARRAYCOPY_DISJOINT; +++ if (dest_uninitialized) { +++ decorators |= IS_DEST_UNINITIALIZED; +++ } +++ if (aligned) { +++ decorators |= ARRAYCOPY_ALIGNED; +++ } +++ +++ BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); +++ bs->arraycopy_prologue(_masm, decorators, is_oop, s, d, count, saved_reg); +++ +++ if (is_oop) { +++ // save regs before copy_memory +++ __ push_reg(RegSet::of(d, count), sp); +++ } +++ +++ { +++ // UnsafeCopyMemory page error: continue after ucm +++ bool add_entry = !is_oop && (!aligned || sizeof(jlong) == size); +++ UnsafeCopyMemoryMark ucmm(this, add_entry, true); +++ copy_memory(aligned, s, d, count, t0, size); +++ } +++ +++ if (is_oop) { +++ __ pop_reg(RegSet::of(d, count), sp); +++ if (VerifyOops) { +++ verify_oop_array(size, d, count, t2); +++ } +++ } +++ +++ bs->arraycopy_epilogue(_masm, decorators, is_oop, d, count, t0, RegSet()); +++ +++ __ leave(); +++ __ mv(x10, zr); // return 0 +++ __ ret(); +++ return start; +++ } +++ +++ // Arguments: +++ // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary +++ // ignored +++ // is_oop - true => oop array, so generate store check code +++ // name - stub name string +++ // +++ // Inputs: +++ // c_rarg0 - source array address +++ // c_rarg1 - destination array address +++ // c_rarg2 - element count, treated as ssize_t, can be zero +++ // +++ // If 'from' and/or 'to' are aligned on 4-byte boundaries, we let +++ // the hardware handle it. The two dwords within qwords that span +++ // cache line boundaries will still be loaded and stored atomicly. +++ // +++ address generate_conjoint_copy(size_t size, bool aligned, bool is_oop, address nooverlap_target, +++ address* entry, const char* name, +++ bool dest_uninitialized = false) { +++ const Register s = c_rarg0, d = c_rarg1, count = c_rarg2; +++ RegSet saved_regs = RegSet::of(s, d, count); +++ StubCodeMark mark(this, "StubRoutines", name); +++ address start = __ pc(); +++ __ enter(); +++ +++ if (entry != NULL) { +++ *entry = __ pc(); +++ // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) +++ BLOCK_COMMENT("Entry:"); +++ } +++ +++ // use fwd copy when (d-s) above_equal (count*size) +++ __ sub(t0, d, s); +++ __ slli(t1, count, exact_log2(size)); +++ __ bgeu(t0, t1, nooverlap_target); +++ +++ DecoratorSet decorators = IN_HEAP | IS_ARRAY; +++ if (dest_uninitialized) { +++ decorators |= IS_DEST_UNINITIALIZED; +++ } +++ if (aligned) { +++ decorators |= ARRAYCOPY_ALIGNED; +++ } +++ +++ BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); +++ bs->arraycopy_prologue(_masm, decorators, is_oop, s, d, count, saved_regs); +++ +++ if (is_oop) { +++ // save regs before copy_memory +++ __ push_reg(RegSet::of(d, count), sp); +++ } +++ +++ { +++ // UnsafeCopyMemory page error: continue after ucm +++ bool add_entry = !is_oop && (!aligned || sizeof(jlong) == size); +++ UnsafeCopyMemoryMark ucmm(this, add_entry, true); +++ copy_memory(aligned, s, d, count, t0, -size); +++ } +++ +++ if (is_oop) { +++ __ pop_reg(RegSet::of(d, count), sp); +++ if (VerifyOops) { +++ verify_oop_array(size, d, count, t2); +++ } +++ } +++ bs->arraycopy_epilogue(_masm, decorators, is_oop, d, count, t0, RegSet()); +++ __ leave(); +++ __ mv(x10, zr); // return 0 +++ __ ret(); +++ return start; +++ } +++ +++ // Arguments: +++ // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary +++ // ignored +++ // name - stub name string +++ // +++ // Inputs: +++ // c_rarg0 - source array address +++ // c_rarg1 - destination array address +++ // c_rarg2 - element count, treated as ssize_t, can be zero +++ // +++ // If 'from' and/or 'to' are aligned on 4-, 2-, or 1-byte boundaries, +++ // we let the hardware handle it. The one to eight bytes within words, +++ // dwords or qwords that span cache line boundaries will still be loaded +++ // and stored atomically. +++ // +++ // Side Effects: +++ // disjoint_byte_copy_entry is set to the no-overlap entry point // +++ // If 'from' and/or 'to' are aligned on 4-, 2-, or 1-byte boundaries, +++ // we let the hardware handle it. The one to eight bytes within words, +++ // dwords or qwords that span cache line boundaries will still be loaded +++ // and stored atomically. +++ // +++ // Side Effects: +++ // disjoint_byte_copy_entry is set to the no-overlap entry point +++ // used by generate_conjoint_byte_copy(). +++ // +++ address generate_disjoint_byte_copy(bool aligned, address* entry, const char* name) { +++ const bool not_oop = false; +++ return generate_disjoint_copy(sizeof (jbyte), aligned, not_oop, entry, name); +++ } +++ +++ // Arguments: +++ // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary +++ // ignored +++ // name - stub name string +++ // +++ // Inputs: +++ // c_rarg0 - source array address +++ // c_rarg1 - destination array address +++ // c_rarg2 - element count, treated as ssize_t, can be zero +++ // +++ // If 'from' and/or 'to' are aligned on 4-, 2-, or 1-byte boundaries, +++ // we let the hardware handle it. The one to eight bytes within words, +++ // dwords or qwords that span cache line boundaries will still be loaded +++ // and stored atomically. +++ // +++ address generate_conjoint_byte_copy(bool aligned, address nooverlap_target, +++ address* entry, const char* name) { +++ const bool not_oop = false; +++ return generate_conjoint_copy(sizeof (jbyte), aligned, not_oop, nooverlap_target, entry, name); +++ } +++ +++ // Arguments: +++ // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary +++ // ignored +++ // name - stub name string +++ // +++ // Inputs: +++ // c_rarg0 - source array address +++ // c_rarg1 - destination array address +++ // c_rarg2 - element count, treated as ssize_t, can be zero +++ // +++ // If 'from' and/or 'to' are aligned on 4- or 2-byte boundaries, we +++ // let the hardware handle it. The two or four words within dwords +++ // or qwords that span cache line boundaries will still be loaded +++ // and stored atomically. +++ // +++ // Side Effects: +++ // disjoint_short_copy_entry is set to the no-overlap entry point +++ // used by generate_conjoint_short_copy(). +++ // +++ address generate_disjoint_short_copy(bool aligned, +++ address* entry, const char* name) { +++ const bool not_oop = false; +++ return generate_disjoint_copy(sizeof (jshort), aligned, not_oop, entry, name); +++ } +++ +++ // Arguments: +++ // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary +++ // ignored +++ // name - stub name string +++ // +++ // Inputs: +++ // c_rarg0 - source array address +++ // c_rarg1 - destination array address +++ // c_rarg2 - element count, treated as ssize_t, can be zero +++ // +++ // If 'from' and/or 'to' are aligned on 4- or 2-byte boundaries, we +++ // let the hardware handle it. The two or four words within dwords +++ // or qwords that span cache line boundaries will still be loaded +++ // and stored atomically. +++ // +++ address generate_conjoint_short_copy(bool aligned, address nooverlap_target, +++ address* entry, const char* name) { +++ const bool not_oop = false; +++ return generate_conjoint_copy(sizeof (jshort), aligned, not_oop, nooverlap_target, entry, name); +++ } +++ +++ // Arguments: +++ // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary +++ // ignored +++ // name - stub name string +++ // +++ // Inputs: +++ // c_rarg0 - source array address +++ // c_rarg1 - destination array address +++ // c_rarg2 - element count, treated as ssize_t, can be zero +++ // +++ // If 'from' and/or 'to' are aligned on 4-byte boundaries, we let +++ // the hardware handle it. The two dwords within qwords that span +++ // cache line boundaries will still be loaded and stored atomicly. +++ // +++ // Side Effects: +++ // disjoint_int_copy_entry is set to the no-overlap entry point +++ // used by generate_conjoint_int_oop_copy(). +++ // +++ address generate_disjoint_int_copy(bool aligned, address* entry, +++ const char* name, bool dest_uninitialized = false) { +++ const bool not_oop = false; +++ return generate_disjoint_copy(sizeof (jint), aligned, not_oop, entry, name); +++ } +++ +++ // Arguments: +++ // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary +++ // ignored +++ // name - stub name string +++ // +++ // Inputs: +++ // c_rarg0 - source array address +++ // c_rarg1 - destination array address +++ // c_rarg2 - element count, treated as ssize_t, can be zero +++ // +++ // If 'from' and/or 'to' are aligned on 4-byte boundaries, we let +++ // the hardware handle it. The two dwords within qwords that span +++ // cache line boundaries will still be loaded and stored atomicly. +++ // +++ address generate_conjoint_int_copy(bool aligned, address nooverlap_target, +++ address* entry, const char* name, +++ bool dest_uninitialized = false) { +++ const bool not_oop = false; +++ return generate_conjoint_copy(sizeof (jint), aligned, not_oop, nooverlap_target, entry, name); +++ } +++ +++ +++ // Arguments: +++ // aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes +++ // ignored +++ // name - stub name string +++ // +++ // Inputs: +++ // c_rarg0 - source array address +++ // c_rarg1 - destination array address +++ // c_rarg2 - element count, treated as size_t, can be zero +++ // +++ // Side Effects: +++ // disjoint_oop_copy_entry or disjoint_long_copy_entry is set to the +++ // no-overlap entry point used by generate_conjoint_long_oop_copy(). +++ // +++ address generate_disjoint_long_copy(bool aligned, address* entry, +++ const char* name, bool dest_uninitialized = false) { +++ const bool not_oop = false; +++ return generate_disjoint_copy(sizeof (jlong), aligned, not_oop, entry, name); +++ } +++ +++ // Arguments: +++ // aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes +++ // ignored +++ // name - stub name string +++ // +++ // Inputs: +++ // c_rarg0 - source array address +++ // c_rarg1 - destination array address +++ // c_rarg2 - element count, treated as size_t, can be zero +++ // +++ address generate_conjoint_long_copy(bool aligned, +++ address nooverlap_target, address* entry, +++ const char* name, bool dest_uninitialized = false) { +++ const bool not_oop = false; +++ return generate_conjoint_copy(sizeof (jlong), aligned, not_oop, nooverlap_target, entry, name); +++ } +++ +++ // Arguments: +++ // aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes +++ // ignored +++ // name - stub name string +++ // +++ // Inputs: +++ // c_rarg0 - source array address +++ // c_rarg1 - destination array address +++ // c_rarg2 - element count, treated as size_t, can be zero +++ // +++ // Side Effects: +++ // disjoint_oop_copy_entry or disjoint_long_copy_entry is set to the +++ // no-overlap entry point used by generate_conjoint_long_oop_copy(). +++ // +++ address generate_disjoint_oop_copy(bool aligned, address* entry, +++ const char* name, bool dest_uninitialized) { +++ const bool is_oop = true; +++ const size_t size = UseCompressedOops ? sizeof (jint) : sizeof (jlong); +++ return generate_disjoint_copy(size, aligned, is_oop, entry, name, dest_uninitialized); +++ } +++ +++ // Arguments: +++ // aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes +++ // ignored +++ // name - stub name string +++ // +++ // Inputs: +++ // c_rarg0 - source array address +++ // c_rarg1 - destination array address +++ // c_rarg2 - element count, treated as size_t, can be zero +++ // +++ address generate_conjoint_oop_copy(bool aligned, +++ address nooverlap_target, address* entry, +++ const char* name, bool dest_uninitialized) { +++ const bool is_oop = true; +++ const size_t size = UseCompressedOops ? sizeof (jint) : sizeof (jlong); +++ return generate_conjoint_copy(size, aligned, is_oop, nooverlap_target, entry, +++ name, dest_uninitialized); +++ } +++ +++ // Helper for generating a dynamic type check. +++ // Smashes t0, t1. +++ void generate_type_check(Register sub_klass, +++ Register super_check_offset, +++ Register super_klass, +++ Label& L_success) { +++ assert_different_registers(sub_klass, super_check_offset, super_klass); +++ +++ BLOCK_COMMENT("type_check:"); +++ +++ Label L_miss; +++ +++ __ check_klass_subtype_fast_path(sub_klass, super_klass, noreg, &L_success, &L_miss, NULL, super_check_offset); +++ __ check_klass_subtype_slow_path(sub_klass, super_klass, noreg, noreg, &L_success, NULL); +++ +++ // Fall through on failure! +++ __ BIND(L_miss); +++ } +++ +++ // +++ // Generate checkcasting array copy stub +++ // +++ // Input: +++ // c_rarg0 - source array address +++ // c_rarg1 - destination array address +++ // c_rarg2 - element count, treated as ssize_t, can be zero +++ // c_rarg3 - size_t ckoff (super_check_offset) +++ // c_rarg4 - oop ckval (super_klass) +++ // +++ // Output: +++ // x10 == 0 - success +++ // x10 == -1^K - failure, where K is partial transfer count +++ // +++ address generate_checkcast_copy(const char* name, address* entry, +++ bool dest_uninitialized = false) { +++ Label L_load_element, L_store_element, L_do_card_marks, L_done, L_done_pop; +++ +++ // Input registers (after setup_arg_regs) +++ const Register from = c_rarg0; // source array address +++ const Register to = c_rarg1; // destination array address +++ const Register count = c_rarg2; // elementscount +++ const Register ckoff = c_rarg3; // super_check_offset +++ const Register ckval = c_rarg4; // super_klass +++ +++ RegSet wb_pre_saved_regs = RegSet::range(c_rarg0, c_rarg4); +++ RegSet wb_post_saved_regs = RegSet::of(count); +++ +++ // Registers used as temps (x7, x9, x18 are save-on-entry) +++ const Register count_save = x19; // orig elementscount +++ const Register start_to = x18; // destination array start address +++ const Register copied_oop = x7; // actual oop copied +++ const Register r9_klass = x9; // oop._klass +++ +++ //--------------------------------------------------------------- +++ // Assembler stub will be used for this call to arraycopy +++ // if the two arrays are subtypes of Object[] but the +++ // destination array type is not equal to or a supertype +++ // of the source type. Each element must be separately +++ // checked. +++ +++ assert_different_registers(from, to, count, ckoff, ckval, start_to, +++ copied_oop, r9_klass, count_save); +++ +++ __ align(CodeEntryAlignment); +++ StubCodeMark mark(this, "StubRoutines", name); +++ address start = __ pc(); +++ +++ __ enter(); // required for proper stackwalking of RuntimeStub frame +++ +++ // Caller of this entry point must set up the argument registers. +++ if (entry != NULL) { +++ *entry = __ pc(); +++ BLOCK_COMMENT("Entry:"); +++ } +++ +++ // Empty array: Nothing to do +++ __ beqz(count, L_done); +++ +++ __ push_reg(RegSet::of(x7, x9, x18, x19), sp); +++ +++#ifdef ASSERT +++ BLOCK_COMMENT("assert consistent ckoff/ckval"); +++ // The ckoff and ckval must be mutually consistent, +++ // even though caller generates both. +++ { Label L; +++ int sco_offset = in_bytes(Klass::super_check_offset_offset()); +++ __ lwu(start_to, Address(ckval, sco_offset)); +++ __ beq(ckoff, start_to, L); +++ __ stop("super_check_offset inconsistent"); +++ __ bind(L); +++ } +++#endif //ASSERT +++ +++ DecoratorSet decorators = IN_HEAP | IS_ARRAY | ARRAYCOPY_CHECKCAST | ARRAYCOPY_DISJOINT; +++ bool is_oop = true; +++ if (dest_uninitialized) { +++ decorators |= IS_DEST_UNINITIALIZED; +++ } +++ +++ BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); +++ bs->arraycopy_prologue(_masm, decorators, is_oop, from, to, count, wb_pre_saved_regs); +++ +++ // save the original count +++ __ mv(count_save, count); +++ +++ // Copy from low to high addresses +++ __ mv(start_to, to); // Save destination array start address +++ __ j(L_load_element); +++ +++ // ======== begin loop ======== +++ // (Loop is rotated; its entry is L_load_element.) +++ // Loop control: +++ // for count to 0 do +++ // copied_oop = load_heap_oop(from++) +++ // ... generate_type_check ... +++ // store_heap_oop(to++, copied_oop) +++ // end +++ +++ __ align(OptoLoopAlignment); +++ +++ __ BIND(L_store_element); +++ __ store_heap_oop(Address(to, 0), copied_oop, noreg, noreg, AS_RAW); // store the oop +++ __ add(to, to, UseCompressedOops ? 4 : 8); +++ __ sub(count, count, 1); +++ __ beqz(count, L_do_card_marks); +++ +++ // ======== loop entry is here ======== +++ __ BIND(L_load_element); +++ __ load_heap_oop(copied_oop, Address(from, 0), noreg, noreg, AS_RAW); // load the oop +++ __ add(from, from, UseCompressedOops ? 4 : 8); +++ __ beqz(copied_oop, L_store_element); +++ +++ __ load_klass(r9_klass, copied_oop);// query the object klass +++ generate_type_check(r9_klass, ckoff, ckval, L_store_element); +++ // ======== end loop ======== +++ +++ // It was a real error; we must depend on the caller to finish the job. +++ // Register count = remaining oops, count_orig = total oops. +++ // Emit GC store barriers for the oops we have copied and report +++ // their number to the caller. +++ +++ __ sub(count, count_save, count); // K = partially copied oop count +++ __ xori(count, count, -1); // report (-1^K) to caller +++ __ beqz(count, L_done_pop); +++ +++ __ BIND(L_do_card_marks); +++ bs->arraycopy_epilogue(_masm, decorators, is_oop, start_to, count_save, t0, wb_post_saved_regs); +++ +++ __ bind(L_done_pop); +++ __ pop_reg(RegSet::of(x7, x9, x18, x19), sp); +++ inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr); +++ +++ __ bind(L_done); +++ __ mv(x10, count); +++ __ leave(); +++ __ ret(); +++ +++ return start; +++ } +++ +++ // Perform range checks on the proposed arraycopy. +++ // Kills temp, but nothing else. +++ // Also, clean the sign bits of src_pos and dst_pos. +++ void arraycopy_range_checks(Register src, // source array oop (c_rarg0) +++ Register src_pos, // source position (c_rarg1) +++ Register dst, // destination array oo (c_rarg2) +++ Register dst_pos, // destination position (c_rarg3) +++ Register length, +++ Register temp, +++ Label& L_failed) { +++ BLOCK_COMMENT("arraycopy_range_checks:"); +++ +++ assert_different_registers(t0, temp); +++ +++ // if [src_pos + length > arrayOop(src)->length()] then FAIL +++ __ lwu(t0, Address(src, arrayOopDesc::length_offset_in_bytes())); +++ __ addw(temp, length, src_pos); +++ __ bgtu(temp, t0, L_failed); +++ +++ // if [dst_pos + length > arrayOop(dst)->length()] then FAIL +++ __ lwu(t0, Address(dst, arrayOopDesc::length_offset_in_bytes())); +++ __ addw(temp, length, dst_pos); +++ __ bgtu(temp, t0, L_failed); +++ +++ // Have to clean up high 32 bits of 'src_pos' and 'dst_pos'. +++ __ zero_extend(src_pos, src_pos, 32); +++ __ zero_extend(dst_pos, dst_pos, 32); +++ +++ BLOCK_COMMENT("arraycopy_range_checks done"); +++ } +++ +++ // +++ // Generate 'unsafe' array copy stub +++ // Though just as safe as the other stubs, it takes an unscaled +++ // size_t argument instead of an element count. +++ // +++ // Input: +++ // c_rarg0 - source array address +++ // c_rarg1 - destination array address +++ // c_rarg2 - byte count, treated as ssize_t, can be zero +++ // +++ // Examines the alignment of the operands and dispatches +++ // to a long, int, short, or byte copy loop. +++ // +++ address generate_unsafe_copy(const char* name, +++ address byte_copy_entry, +++ address short_copy_entry, +++ address int_copy_entry, +++ address long_copy_entry) { +++ assert_cond(byte_copy_entry != NULL && short_copy_entry != NULL && +++ int_copy_entry != NULL && long_copy_entry != NULL); +++ Label L_long_aligned, L_int_aligned, L_short_aligned; +++ const Register s = c_rarg0, d = c_rarg1, count = c_rarg2; +++ +++ __ align(CodeEntryAlignment); +++ StubCodeMark mark(this, "StubRoutines", name); +++ address start = __ pc(); +++ __ enter(); // required for proper stackwalking of RuntimeStub frame +++ +++ // bump this on entry, not on exit: +++ inc_counter_np(SharedRuntime::_unsafe_array_copy_ctr); +++ +++ __ orr(t0, s, d); +++ __ orr(t0, t0, count); +++ +++ __ andi(t0, t0, BytesPerLong - 1); +++ __ beqz(t0, L_long_aligned); +++ __ andi(t0, t0, BytesPerInt - 1); +++ __ beqz(t0, L_int_aligned); +++ __ test_bit(t0, t0, 0); +++ __ beqz(t0, L_short_aligned); +++ __ j(RuntimeAddress(byte_copy_entry)); +++ +++ __ BIND(L_short_aligned); +++ __ srli(count, count, LogBytesPerShort); // size => short_count +++ __ j(RuntimeAddress(short_copy_entry)); +++ __ BIND(L_int_aligned); +++ __ srli(count, count, LogBytesPerInt); // size => int_count +++ __ j(RuntimeAddress(int_copy_entry)); +++ __ BIND(L_long_aligned); +++ __ srli(count, count, LogBytesPerLong); // size => long_count +++ __ j(RuntimeAddress(long_copy_entry)); +++ +++ return start; +++ } +++ +++ // +++ // Generate generic array copy stubs +++ // +++ // Input: +++ // c_rarg0 - src oop +++ // c_rarg1 - src_pos (32-bits) +++ // c_rarg2 - dst oop +++ // c_rarg3 - dst_pos (32-bits) +++ // c_rarg4 - element count (32-bits) +++ // +++ // Output: +++ // x10 == 0 - success +++ // x10 == -1^K - failure, where K is partial transfer count +++ // +++ address generate_generic_copy(const char* name, +++ address byte_copy_entry, address short_copy_entry, +++ address int_copy_entry, address oop_copy_entry, +++ address long_copy_entry, address checkcast_copy_entry) { +++ assert_cond(byte_copy_entry != NULL && short_copy_entry != NULL && +++ int_copy_entry != NULL && oop_copy_entry != NULL && +++ long_copy_entry != NULL && checkcast_copy_entry != NULL); +++ Label L_failed, L_failed_0, L_objArray; +++ Label L_copy_bytes, L_copy_shorts, L_copy_ints, L_copy_longs; +++ +++ // Input registers +++ const Register src = c_rarg0; // source array oop +++ const Register src_pos = c_rarg1; // source position +++ const Register dst = c_rarg2; // destination array oop +++ const Register dst_pos = c_rarg3; // destination position +++ const Register length = c_rarg4; +++ +++ // Registers used as temps +++ const Register dst_klass = c_rarg5; +++ +++ __ align(CodeEntryAlignment); +++ +++ StubCodeMark mark(this, "StubRoutines", name); +++ +++ address start = __ pc(); +++ +++ __ enter(); // required for proper stackwalking of RuntimeStub frame +++ +++ // bump this on entry, not on exit: +++ inc_counter_np(SharedRuntime::_generic_array_copy_ctr); +++ +++ //----------------------------------------------------------------------- +++ // Assembler stub will be used for this call to arraycopy +++ // if the following conditions are met: +++ // +++ // (1) src and dst must not be null. +++ // (2) src_pos must not be negative. +++ // (3) dst_pos must not be negative. +++ // (4) length must not be negative. +++ // (5) src klass and dst klass should be the same and not NULL. +++ // (6) src and dst should be arrays. +++ // (7) src_pos + length must not exceed length of src. +++ // (8) dst_pos + length must not exceed length of dst. +++ // +++ +++ // if [src == NULL] then return -1 +++ __ beqz(src, L_failed); +++ +++ // if [src_pos < 0] then return -1 +++ __ sign_extend(t0, src_pos, 32); +++ __ bltz(t0, L_failed); +++ +++ // if [dst == NULL] then return -1 +++ __ beqz(dst, L_failed); +++ +++ // if [dst_pos < 0] then return -1 +++ __ sign_extend(t0, dst_pos, 32); +++ __ bltz(t0, L_failed); +++ +++ // registers used as temp +++ const Register scratch_length = x28; // elements count to copy +++ const Register scratch_src_klass = x29; // array klass +++ const Register lh = x30; // layout helper +++ +++ // if [length < 0] then return -1 +++ __ sign_extend(scratch_length, length, 32); // length (elements count, 32-bits value) +++ __ bltz(scratch_length, L_failed); +++ +++ __ load_klass(scratch_src_klass, src); +++#ifdef ASSERT +++ { +++ BLOCK_COMMENT("assert klasses not null {"); +++ Label L1, L2; +++ __ bnez(scratch_src_klass, L2); // it is broken if klass is NULL +++ __ bind(L1); +++ __ stop("broken null klass"); +++ __ bind(L2); +++ __ load_klass(t0, dst, t1); +++ __ beqz(t0, L1); // this would be broken also +++ BLOCK_COMMENT("} assert klasses not null done"); +++ } +++#endif +++ +++ // Load layout helper (32-bits) +++ // +++ // |array_tag| | header_size | element_type | |log2_element_size| +++ // 32 30 24 16 8 2 0 +++ // +++ // array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0 +++ // +++ +++ const int lh_offset = in_bytes(Klass::layout_helper_offset()); +++ +++ // Handle objArrays completely differently... +++ const jint objArray_lh = Klass::array_layout_helper(T_OBJECT); +++ __ lw(lh, Address(scratch_src_klass, lh_offset)); +++ __ mv(t0, objArray_lh); +++ __ beq(lh, t0, L_objArray); +++ +++ // if [src->klass() != dst->klass()] then return -1 +++ __ load_klass(t1, dst); +++ __ bne(t1, scratch_src_klass, L_failed); +++ +++ // if [src->is_Array() != NULL] then return -1 +++ // i.e. (lh >= 0) +++ __ bgez(lh, L_failed); +++ +++ // At this point, it is known to be a typeArray (array_tag 0x3). +++#ifdef ASSERT +++ { +++ BLOCK_COMMENT("assert primitive array {"); +++ Label L; +++ __ mv(t1, (int32_t)(Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift)); +++ __ bge(lh, t1, L); +++ __ stop("must be a primitive array"); +++ __ bind(L); +++ BLOCK_COMMENT("} assert primitive array done"); +++ } +++#endif +++ +++ arraycopy_range_checks(src, src_pos, dst, dst_pos, scratch_length, +++ t1, L_failed); +++ +++ // TypeArrayKlass +++ // +++ // src_addr = (src + array_header_in_bytes()) + (src_pos << log2elemsize) +++ // dst_addr = (dst + array_header_in_bytes()) + (dst_pos << log2elemsize) +++ // +++ +++ const Register t0_offset = t0; // array offset +++ const Register x30_elsize = lh; // element size +++ +++ // Get array_header_in_bytes() +++ int lh_header_size_width = exact_log2(Klass::_lh_header_size_mask + 1); +++ int lh_header_size_msb = Klass::_lh_header_size_shift + lh_header_size_width; +++ __ slli(t0_offset, lh, XLEN - lh_header_size_msb); // left shift to remove 24 ~ 32; +++ __ srli(t0_offset, t0_offset, XLEN - lh_header_size_width); // array_offset +++ +++ __ add(src, src, t0_offset); // src array offset +++ __ add(dst, dst, t0_offset); // dst array offset +++ BLOCK_COMMENT("choose copy loop based on element size"); +++ +++ // next registers should be set before the jump to corresponding stub +++ const Register from = c_rarg0; // source array address +++ const Register to = c_rarg1; // destination array address +++ const Register count = c_rarg2; // elements count +++ +++ // 'from', 'to', 'count' registers should be set in such order +++ // since they are the same as 'src', 'src_pos', 'dst'. +++ +++ assert(Klass::_lh_log2_element_size_shift == 0, "fix this code"); +++ +++ // The possible values of elsize are 0-3, i.e. exact_log2(element +++ // size in bytes). We do a simple bitwise binary search. +++ __ BIND(L_copy_bytes); +++ __ test_bit(t0, x30_elsize, 1); +++ __ bnez(t0, L_copy_ints); +++ __ test_bit(t0, x30_elsize, 0); +++ __ bnez(t0, L_copy_shorts); +++ __ add(from, src, src_pos); // src_addr +++ __ add(to, dst, dst_pos); // dst_addr +++ __ sign_extend(count, scratch_length, 32); // length +++ __ j(RuntimeAddress(byte_copy_entry)); +++ +++ __ BIND(L_copy_shorts); +++ __ shadd(from, src_pos, src, t0, 1); // src_addr +++ __ shadd(to, dst_pos, dst, t0, 1); // dst_addr +++ __ sign_extend(count, scratch_length, 32); // length +++ __ j(RuntimeAddress(short_copy_entry)); +++ +++ __ BIND(L_copy_ints); +++ __ test_bit(t0, x30_elsize, 0); +++ __ bnez(t0, L_copy_longs); +++ __ shadd(from, src_pos, src, t0, 2); // src_addr +++ __ shadd(to, dst_pos, dst, t0, 2); // dst_addr +++ __ sign_extend(count, scratch_length, 32); // length +++ __ j(RuntimeAddress(int_copy_entry)); +++ +++ __ BIND(L_copy_longs); +++#ifdef ASSERT +++ { +++ BLOCK_COMMENT("assert long copy {"); +++ Label L; +++ __ andi(lh, lh, Klass::_lh_log2_element_size_mask); // lh -> x30_elsize +++ __ sign_extend(lh, lh, 32); +++ __ mv(t0, LogBytesPerLong); +++ __ beq(x30_elsize, t0, L); +++ __ stop("must be long copy, but elsize is wrong"); +++ __ bind(L); +++ BLOCK_COMMENT("} assert long copy done"); +++ } +++#endif +++ __ shadd(from, src_pos, src, t0, 3); // src_addr +++ __ shadd(to, dst_pos, dst, t0, 3); // dst_addr +++ __ sign_extend(count, scratch_length, 32); // length +++ __ j(RuntimeAddress(long_copy_entry)); +++ +++ // ObjArrayKlass +++ __ BIND(L_objArray); +++ // live at this point: scratch_src_klass, scratch_length, src[_pos], dst[_pos] +++ +++ Label L_plain_copy, L_checkcast_copy; +++ // test array classes for subtyping +++ __ load_klass(t2, dst); +++ __ bne(scratch_src_klass, t2, L_checkcast_copy); // usual case is exact equality +++ +++ // Identically typed arrays can be copied without element-wise checks. +++ arraycopy_range_checks(src, src_pos, dst, dst_pos, scratch_length, +++ t1, L_failed); +++ +++ __ shadd(from, src_pos, src, t0, LogBytesPerHeapOop); +++ __ add(from, from, arrayOopDesc::base_offset_in_bytes(T_OBJECT)); +++ __ shadd(to, dst_pos, dst, t0, LogBytesPerHeapOop); +++ __ add(to, to, arrayOopDesc::base_offset_in_bytes(T_OBJECT)); +++ __ sign_extend(count, scratch_length, 32); // length +++ __ BIND(L_plain_copy); +++ __ j(RuntimeAddress(oop_copy_entry)); +++ +++ __ BIND(L_checkcast_copy); +++ // live at this point: scratch_src_klass, scratch_length, t2 (dst_klass) +++ { +++ // Before looking at dst.length, make sure dst is also an objArray. +++ __ lwu(t0, Address(t2, lh_offset)); +++ __ mv(t1, objArray_lh); +++ __ bne(t0, t1, L_failed); +++ +++ // It is safe to examine both src.length and dst.length. +++ arraycopy_range_checks(src, src_pos, dst, dst_pos, scratch_length, +++ t2, L_failed); +++ +++ __ load_klass(dst_klass, dst); // reload +++ +++ // Marshal the base address arguments now, freeing registers. +++ __ shadd(from, src_pos, src, t0, LogBytesPerHeapOop); +++ __ add(from, from, arrayOopDesc::base_offset_in_bytes(T_OBJECT)); +++ __ shadd(to, dst_pos, dst, t0, LogBytesPerHeapOop); +++ __ add(to, to, arrayOopDesc::base_offset_in_bytes(T_OBJECT)); +++ __ sign_extend(count, length, 32); // length (reloaded) +++ const Register sco_temp = c_rarg3; // this register is free now +++ assert_different_registers(from, to, count, sco_temp, +++ dst_klass, scratch_src_klass); +++ +++ // Generate the type check. +++ const int sco_offset = in_bytes(Klass::super_check_offset_offset()); +++ __ lwu(sco_temp, Address(dst_klass, sco_offset)); +++ +++ // Smashes t0, t1 +++ generate_type_check(scratch_src_klass, sco_temp, dst_klass, L_plain_copy); +++ +++ // Fetch destination element klass from the ObjArrayKlass header. +++ int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset()); +++ __ ld(dst_klass, Address(dst_klass, ek_offset)); +++ __ lwu(sco_temp, Address(dst_klass, sco_offset)); +++ +++ // the checkcast_copy loop needs two extra arguments: +++ assert(c_rarg3 == sco_temp, "#3 already in place"); +++ // Set up arguments for checkcast_copy_entry. +++ __ mv(c_rarg4, dst_klass); // dst.klass.element_klass +++ __ j(RuntimeAddress(checkcast_copy_entry)); +++ } +++ +++ __ BIND(L_failed); +++ __ mv(x10, -1); +++ __ leave(); // required for proper stackwalking of RuntimeStub frame +++ __ ret(); +++ +++ return start; +++ } +++ +++ // +++ // Generate stub for array fill. If "aligned" is true, the +++ // "to" address is assumed to be heapword aligned. +++ // +++ // Arguments for generated stub: +++ // to: c_rarg0 +++ // value: c_rarg1 +++ // count: c_rarg2 treated as signed +++ // +++ address generate_fill(BasicType t, bool aligned, const char* name) { +++ __ align(CodeEntryAlignment); +++ StubCodeMark mark(this, "StubRoutines", name); +++ address start = __ pc(); +++ +++ BLOCK_COMMENT("Entry:"); +++ +++ const Register to = c_rarg0; // source array address +++ const Register value = c_rarg1; // value +++ const Register count = c_rarg2; // elements count +++ +++ const Register bz_base = x28; // base for block_zero routine +++ const Register cnt_words = x29; // temp register +++ const Register tmp_reg = t1; +++ +++ __ enter(); +++ +++ Label L_fill_elements, L_exit1; +++ +++ int shift = -1; +++ switch (t) { +++ case T_BYTE: +++ shift = 0; +++ +++ // Zero extend value +++ // 8 bit -> 16 bit +++ __ andi(value, value, 0xff); +++ __ mv(tmp_reg, value); +++ __ slli(tmp_reg, tmp_reg, 8); +++ __ orr(value, value, tmp_reg); +++ +++ // 16 bit -> 32 bit +++ __ mv(tmp_reg, value); +++ __ slli(tmp_reg, tmp_reg, 16); +++ __ orr(value, value, tmp_reg); +++ +++ __ mv(tmp_reg, 8 >> shift); // Short arrays (< 8 bytes) fill by element +++ __ bltu(count, tmp_reg, L_fill_elements); +++ break; +++ case T_SHORT: +++ shift = 1; +++ // Zero extend value +++ // 16 bit -> 32 bit +++ __ andi(value, value, 0xffff); +++ __ mv(tmp_reg, value); +++ __ slli(tmp_reg, tmp_reg, 16); +++ __ orr(value, value, tmp_reg); +++ +++ // Short arrays (< 8 bytes) fill by element +++ __ mv(tmp_reg, 8 >> shift); +++ __ bltu(count, tmp_reg, L_fill_elements); +++ break; +++ case T_INT: +++ shift = 2; +++ +++ // Short arrays (< 8 bytes) fill by element +++ __ mv(tmp_reg, 8 >> shift); +++ __ bltu(count, tmp_reg, L_fill_elements); +++ break; +++ default: ShouldNotReachHere(); +++ } +++ +++ // Align source address at 8 bytes address boundary. +++ Label L_skip_align1, L_skip_align2, L_skip_align4; +++ if (!aligned) { +++ switch (t) { +++ case T_BYTE: +++ // One byte misalignment happens only for byte arrays. +++ __ test_bit(t0, to, 0); +++ __ beqz(t0, L_skip_align1); +++ __ sb(value, Address(to, 0)); +++ __ addi(to, to, 1); +++ __ addiw(count, count, -1); +++ __ bind(L_skip_align1); +++ // Fallthrough +++ case T_SHORT: +++ // Two bytes misalignment happens only for byte and short (char) arrays. +++ __ test_bit(t0, to, 1); +++ __ beqz(t0, L_skip_align2); +++ __ sh(value, Address(to, 0)); +++ __ addi(to, to, 2); +++ __ addiw(count, count, -(2 >> shift)); +++ __ bind(L_skip_align2); +++ // Fallthrough +++ case T_INT: +++ // Align to 8 bytes, we know we are 4 byte aligned to start. +++ __ test_bit(t0, to, 2); +++ __ beqz(t0, L_skip_align4); +++ __ sw(value, Address(to, 0)); +++ __ addi(to, to, 4); +++ __ addiw(count, count, -(4 >> shift)); +++ __ bind(L_skip_align4); +++ break; +++ default: ShouldNotReachHere(); +++ } +++ } +++ +++ // +++ // Fill large chunks +++ // +++ __ srliw(cnt_words, count, 3 - shift); // number of words +++ +++ // 32 bit -> 64 bit +++ __ andi(value, value, 0xffffffff); +++ __ mv(tmp_reg, value); +++ __ slli(tmp_reg, tmp_reg, 32); +++ __ orr(value, value, tmp_reg); +++ +++ __ slli(tmp_reg, cnt_words, 3 - shift); +++ __ subw(count, count, tmp_reg); +++ { +++ __ fill_words(to, cnt_words, value); +++ } +++ +++ // Remaining count is less than 8 bytes. Fill it by a single store. +++ // Note that the total length is no less than 8 bytes. +++ if (t == T_BYTE || t == T_SHORT) { +++ __ beqz(count, L_exit1); +++ __ shadd(to, count, to, tmp_reg, shift); // points to the end +++ __ sd(value, Address(to, -8)); // overwrite some elements +++ __ bind(L_exit1); +++ __ leave(); +++ __ ret(); +++ } +++ +++ // Handle copies less than 8 bytes. +++ Label L_fill_2, L_fill_4, L_exit2; +++ __ bind(L_fill_elements); +++ switch (t) { +++ case T_BYTE: +++ __ test_bit(t0, count, 0); +++ __ beqz(t0, L_fill_2); +++ __ sb(value, Address(to, 0)); +++ __ addi(to, to, 1); +++ __ bind(L_fill_2); +++ __ test_bit(t0, count, 1); +++ __ beqz(t0, L_fill_4); +++ __ sh(value, Address(to, 0)); +++ __ addi(to, to, 2); +++ __ bind(L_fill_4); +++ __ test_bit(t0, count, 2); +++ __ beqz(t0, L_exit2); +++ __ sw(value, Address(to, 0)); +++ break; +++ case T_SHORT: +++ __ test_bit(t0, count, 0); +++ __ beqz(t0, L_fill_4); +++ __ sh(value, Address(to, 0)); +++ __ addi(to, to, 2); +++ __ bind(L_fill_4); +++ __ test_bit(t0, count, 1); +++ __ beqz(t0, L_exit2); +++ __ sw(value, Address(to, 0)); +++ break; +++ case T_INT: +++ __ beqz(count, L_exit2); +++ __ sw(value, Address(to, 0)); +++ break; +++ default: ShouldNotReachHere(); +++ } +++ __ bind(L_exit2); +++ __ leave(); +++ __ ret(); +++ return start; +++ } +++ +++ void generate_arraycopy_stubs() { +++ address entry = NULL; +++ address entry_jbyte_arraycopy = NULL; +++ address entry_jshort_arraycopy = NULL; +++ address entry_jint_arraycopy = NULL; +++ address entry_oop_arraycopy = NULL; +++ address entry_jlong_arraycopy = NULL; +++ address entry_checkcast_arraycopy = NULL; +++ +++ generate_copy_longs(copy_f, c_rarg0, c_rarg1, t1, copy_forwards); +++ generate_copy_longs(copy_b, c_rarg0, c_rarg1, t1, copy_backwards); +++ +++ StubRoutines::riscv::_zero_blocks = generate_zero_blocks(); +++ +++ //*** jbyte +++ // Always need aligned and unaligned versions +++ StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(false, &entry, +++ "jbyte_disjoint_arraycopy"); +++ StubRoutines::_jbyte_arraycopy = generate_conjoint_byte_copy(false, entry, +++ &entry_jbyte_arraycopy, +++ "jbyte_arraycopy"); +++ StubRoutines::_arrayof_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(true, &entry, +++ "arrayof_jbyte_disjoint_arraycopy"); +++ StubRoutines::_arrayof_jbyte_arraycopy = generate_conjoint_byte_copy(true, entry, NULL, +++ "arrayof_jbyte_arraycopy"); +++ +++ //*** jshort +++ // Always need aligned and unaligned versions +++ StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, &entry, +++ "jshort_disjoint_arraycopy"); +++ StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(false, entry, +++ &entry_jshort_arraycopy, +++ "jshort_arraycopy"); +++ StubRoutines::_arrayof_jshort_disjoint_arraycopy = generate_disjoint_short_copy(true, &entry, +++ "arrayof_jshort_disjoint_arraycopy"); +++ StubRoutines::_arrayof_jshort_arraycopy = generate_conjoint_short_copy(true, entry, NULL, +++ "arrayof_jshort_arraycopy"); +++ +++ //*** jint +++ // Aligned versions +++ StubRoutines::_arrayof_jint_disjoint_arraycopy = generate_disjoint_int_copy(true, &entry, +++ "arrayof_jint_disjoint_arraycopy"); +++ StubRoutines::_arrayof_jint_arraycopy = generate_conjoint_int_copy(true, entry, &entry_jint_arraycopy, +++ "arrayof_jint_arraycopy"); +++ // In 64 bit we need both aligned and unaligned versions of jint arraycopy. +++ // entry_jint_arraycopy always points to the unaligned version +++ StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_copy(false, &entry, +++ "jint_disjoint_arraycopy"); +++ StubRoutines::_jint_arraycopy = generate_conjoint_int_copy(false, entry, +++ &entry_jint_arraycopy, +++ "jint_arraycopy"); +++ +++ //*** jlong +++ // It is always aligned +++ StubRoutines::_arrayof_jlong_disjoint_arraycopy = generate_disjoint_long_copy(true, &entry, +++ "arrayof_jlong_disjoint_arraycopy"); +++ StubRoutines::_arrayof_jlong_arraycopy = generate_conjoint_long_copy(true, entry, &entry_jlong_arraycopy, +++ "arrayof_jlong_arraycopy"); +++ StubRoutines::_jlong_disjoint_arraycopy = StubRoutines::_arrayof_jlong_disjoint_arraycopy; +++ StubRoutines::_jlong_arraycopy = StubRoutines::_arrayof_jlong_arraycopy; +++ +++ //*** oops +++ { +++ // With compressed oops we need unaligned versions; notice that +++ // we overwrite entry_oop_arraycopy. +++ bool aligned = !UseCompressedOops; +++ +++ StubRoutines::_arrayof_oop_disjoint_arraycopy +++ = generate_disjoint_oop_copy(aligned, &entry, "arrayof_oop_disjoint_arraycopy", +++ /*dest_uninitialized*/false); +++ StubRoutines::_arrayof_oop_arraycopy +++ = generate_conjoint_oop_copy(aligned, entry, &entry_oop_arraycopy, "arrayof_oop_arraycopy", +++ /*dest_uninitialized*/false); +++ // Aligned versions without pre-barriers +++ StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit +++ = generate_disjoint_oop_copy(aligned, &entry, "arrayof_oop_disjoint_arraycopy_uninit", +++ /*dest_uninitialized*/true); +++ StubRoutines::_arrayof_oop_arraycopy_uninit +++ = generate_conjoint_oop_copy(aligned, entry, NULL, "arrayof_oop_arraycopy_uninit", +++ /*dest_uninitialized*/true); +++ } +++ +++ StubRoutines::_oop_disjoint_arraycopy = StubRoutines::_arrayof_oop_disjoint_arraycopy; +++ StubRoutines::_oop_arraycopy = StubRoutines::_arrayof_oop_arraycopy; +++ StubRoutines::_oop_disjoint_arraycopy_uninit = StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit; +++ StubRoutines::_oop_arraycopy_uninit = StubRoutines::_arrayof_oop_arraycopy_uninit; +++ +++ StubRoutines::_checkcast_arraycopy = generate_checkcast_copy("checkcast_arraycopy", &entry_checkcast_arraycopy); +++ StubRoutines::_checkcast_arraycopy_uninit = generate_checkcast_copy("checkcast_arraycopy_uninit", NULL, +++ /*dest_uninitialized*/true); +++ +++ +++ StubRoutines::_unsafe_arraycopy = generate_unsafe_copy("unsafe_arraycopy", +++ entry_jbyte_arraycopy, +++ entry_jshort_arraycopy, +++ entry_jint_arraycopy, +++ entry_jlong_arraycopy); +++ +++ StubRoutines::_generic_arraycopy = generate_generic_copy("generic_arraycopy", +++ entry_jbyte_arraycopy, +++ entry_jshort_arraycopy, +++ entry_jint_arraycopy, +++ entry_oop_arraycopy, +++ entry_jlong_arraycopy, +++ entry_checkcast_arraycopy); +++ +++ StubRoutines::_jbyte_fill = generate_fill(T_BYTE, false, "jbyte_fill"); +++ StubRoutines::_jshort_fill = generate_fill(T_SHORT, false, "jshort_fill"); +++ StubRoutines::_jint_fill = generate_fill(T_INT, false, "jint_fill"); +++ StubRoutines::_arrayof_jbyte_fill = generate_fill(T_BYTE, true, "arrayof_jbyte_fill"); +++ StubRoutines::_arrayof_jshort_fill = generate_fill(T_SHORT, true, "arrayof_jshort_fill"); +++ StubRoutines::_arrayof_jint_fill = generate_fill(T_INT, true, "arrayof_jint_fill"); +++ } +++ +++ // Safefetch stubs. +++ void generate_safefetch(const char* name, int size, address* entry, +++ address* fault_pc, address* continuation_pc) { +++ // safefetch signatures: +++ // int SafeFetch32(int* adr, int errValue) +++ // intptr_t SafeFetchN (intptr_t* adr, intptr_t errValue) +++ // +++ // arguments: +++ // c_rarg0 = adr +++ // c_rarg1 = errValue +++ // +++ // result: +++ // PPC_RET = *adr or errValue +++ assert_cond(entry != NULL && fault_pc != NULL && continuation_pc != NULL); +++ StubCodeMark mark(this, "StubRoutines", name); +++ +++ // Entry point, pc or function descriptor. +++ *entry = __ pc(); +++ +++ // Load *adr into c_rarg1, may fault. +++ *fault_pc = __ pc(); +++ switch (size) { +++ case 4: +++ // int32_t +++ __ lw(c_rarg1, Address(c_rarg0, 0)); +++ break; +++ case 8: +++ // int64_t +++ __ ld(c_rarg1, Address(c_rarg0, 0)); +++ break; +++ default: +++ ShouldNotReachHere(); +++ } +++ +++ // return errValue or *adr +++ *continuation_pc = __ pc(); +++ __ mv(x10, c_rarg1); +++ __ ret(); +++ } +++ +++ // code for comparing 16 bytes of strings with same encoding +++ void compare_string_16_bytes_same(Label &DIFF1, Label &DIFF2) { +++ const Register result = x10, str1 = x11, cnt1 = x12, str2 = x13, tmp1 = x28, tmp2 = x29, tmp4 = x7, tmp5 = x31; +++ __ ld(tmp5, Address(str1)); +++ __ addi(str1, str1, 8); +++ __ xorr(tmp4, tmp1, tmp2); +++ __ ld(cnt1, Address(str2)); +++ __ addi(str2, str2, 8); +++ __ bnez(tmp4, DIFF1); +++ __ ld(tmp1, Address(str1)); +++ __ addi(str1, str1, 8); +++ __ xorr(tmp4, tmp5, cnt1); +++ __ ld(tmp2, Address(str2)); +++ __ addi(str2, str2, 8); +++ __ bnez(tmp4, DIFF2); +++ } +++ +++ // code for comparing 8 characters of strings with Latin1 and Utf16 encoding +++ void compare_string_8_x_LU(Register tmpL, Register tmpU, Label &DIFF1, +++ Label &DIFF2) { +++ const Register strU = x12, curU = x7, strL = x29, tmp = x30; +++ __ ld(tmpL, Address(strL)); +++ __ addi(strL, strL, 8); +++ __ ld(tmpU, Address(strU)); +++ __ addi(strU, strU, 8); +++ __ inflate_lo32(tmp, tmpL); +++ __ mv(t0, tmp); +++ __ xorr(tmp, curU, t0); +++ __ bnez(tmp, DIFF2); +++ +++ __ ld(curU, Address(strU)); +++ __ addi(strU, strU, 8); +++ __ inflate_hi32(tmp, tmpL); +++ __ mv(t0, tmp); +++ __ xorr(tmp, tmpU, t0); +++ __ bnez(tmp, DIFF1); +++ } +++ +++ // x10 = result +++ // x11 = str1 +++ // x12 = cnt1 +++ // x13 = str2 +++ // x14 = cnt2 +++ // x28 = tmp1 +++ // x29 = tmp2 +++ // x30 = tmp3 +++ address generate_compare_long_string_different_encoding(bool isLU) { +++ __ align(CodeEntryAlignment); +++ StubCodeMark mark(this, "StubRoutines", isLU ? "compare_long_string_different_encoding LU" : "compare_long_string_different_encoding UL"); +++ address entry = __ pc(); +++ Label SMALL_LOOP, TAIL, TAIL_LOAD_16, LOAD_LAST, DIFF1, DIFF2, +++ DONE, CALCULATE_DIFFERENCE; +++ const Register result = x10, str1 = x11, cnt1 = x12, str2 = x13, cnt2 = x14, +++ tmp1 = x28, tmp2 = x29, tmp3 = x30, tmp4 = x7, tmp5 = x31; +++ RegSet spilled_regs = RegSet::of(tmp4, tmp5); +++ +++ // cnt2 == amount of characters left to compare +++ // Check already loaded first 4 symbols +++ __ inflate_lo32(tmp3, isLU ? tmp1 : tmp2); +++ __ mv(isLU ? tmp1 : tmp2, tmp3); +++ __ addi(str1, str1, isLU ? wordSize / 2 : wordSize); +++ __ addi(str2, str2, isLU ? wordSize : wordSize / 2); +++ __ sub(cnt2, cnt2, 8); // Already loaded 4 symbols. Last 4 is special case. +++ __ push_reg(spilled_regs, sp); +++ +++ if (isLU) { +++ __ add(str1, str1, cnt2); +++ __ shadd(str2, cnt2, str2, t0, 1); +++ } else { +++ __ shadd(str1, cnt2, str1, t0, 1); +++ __ add(str2, str2, cnt2); +++ } +++ __ xorr(tmp3, tmp1, tmp2); +++ __ mv(tmp5, tmp2); +++ __ bnez(tmp3, CALCULATE_DIFFERENCE); +++ +++ Register strU = isLU ? str2 : str1, +++ strL = isLU ? str1 : str2, +++ tmpU = isLU ? tmp5 : tmp1, // where to keep U for comparison +++ tmpL = isLU ? tmp1 : tmp5; // where to keep L for comparison +++ +++ __ sub(tmp2, strL, cnt2); // strL pointer to load from +++ __ slli(t0, cnt2, 1); +++ __ sub(cnt1, strU, t0); // strU pointer to load from +++ +++ __ ld(tmp4, Address(cnt1)); +++ __ addi(cnt1, cnt1, 8); +++ __ beqz(cnt2, LOAD_LAST); // no characters left except last load +++ __ sub(cnt2, cnt2, 16); +++ __ bltz(cnt2, TAIL); +++ __ bind(SMALL_LOOP); // smaller loop +++ __ sub(cnt2, cnt2, 16); +++ compare_string_8_x_LU(tmpL, tmpU, DIFF1, DIFF2); +++ compare_string_8_x_LU(tmpL, tmpU, DIFF1, DIFF2); +++ __ bgez(cnt2, SMALL_LOOP); +++ __ addi(t0, cnt2, 16); +++ __ beqz(t0, LOAD_LAST); +++ __ bind(TAIL); // 1..15 characters left until last load (last 4 characters) +++ // Address of 8 bytes before last 4 characters in UTF-16 string +++ __ shadd(cnt1, cnt2, cnt1, t0, 1); +++ // Address of 16 bytes before last 4 characters in Latin1 string +++ __ add(tmp2, tmp2, cnt2); +++ __ ld(tmp4, Address(cnt1, -8)); +++ // last 16 characters before last load +++ compare_string_8_x_LU(tmpL, tmpU, DIFF1, DIFF2); +++ compare_string_8_x_LU(tmpL, tmpU, DIFF1, DIFF2); +++ __ j(LOAD_LAST); +++ __ bind(DIFF2); +++ __ mv(tmpU, tmp4); +++ __ bind(DIFF1); +++ __ mv(tmpL, t0); +++ __ j(CALCULATE_DIFFERENCE); +++ __ bind(LOAD_LAST); +++ // Last 4 UTF-16 characters are already pre-loaded into tmp4 by compare_string_8_x_LU. +++ // No need to load it again +++ __ mv(tmpU, tmp4); +++ __ ld(tmpL, Address(strL)); +++ __ inflate_lo32(tmp3, tmpL); +++ __ mv(tmpL, tmp3); +++ __ xorr(tmp3, tmpU, tmpL); +++ __ beqz(tmp3, DONE); +++ +++ // Find the first different characters in the longwords and +++ // compute their difference. +++ __ bind(CALCULATE_DIFFERENCE); +++ __ ctzc_bit(tmp4, tmp3); +++ __ srl(tmp1, tmp1, tmp4); +++ __ srl(tmp5, tmp5, tmp4); +++ __ andi(tmp1, tmp1, 0xFFFF); +++ __ andi(tmp5, tmp5, 0xFFFF); +++ __ sub(result, tmp1, tmp5); +++ __ bind(DONE); +++ __ pop_reg(spilled_regs, sp); +++ __ ret(); +++ return entry; +++ } +++ +++ address generate_method_entry_barrier() { +++ __ align(CodeEntryAlignment); +++ StubCodeMark mark(this, "StubRoutines", "nmethod_entry_barrier"); +++ +++ Label deoptimize_label; +++ +++ address start = __ pc(); +++ +++ __ set_last_Java_frame(sp, fp, ra, t0); +++ +++ __ enter(); +++ __ add(t1, sp, wordSize); +++ +++ __ sub(sp, sp, 4 * wordSize); +++ +++ __ push_call_clobbered_registers(); +++ +++ __ mv(c_rarg0, t1); +++ __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSetNMethod::nmethod_stub_entry_barrier), 1); +++ +++ __ reset_last_Java_frame(true); +++ +++ __ mv(t0, x10); +++ +++ __ pop_call_clobbered_registers(); +++ +++ __ bnez(t0, deoptimize_label); +++ +++ __ leave(); +++ __ ret(); +++ +++ __ BIND(deoptimize_label); +++ +++ __ ld(t0, Address(sp, 0)); +++ __ ld(fp, Address(sp, wordSize)); +++ __ ld(ra, Address(sp, wordSize * 2)); +++ __ ld(t1, Address(sp, wordSize * 3)); +++ +++ __ mv(sp, t0); +++ __ jr(t1); +++ +++ return start; +++ } +++ +++ // x10 = result +++ // x11 = str1 +++ // x12 = cnt1 +++ // x13 = str2 +++ // x14 = cnt2 +++ // x28 = tmp1 +++ // x29 = tmp2 +++ // x30 = tmp3 +++ // x31 = tmp4 +++ address generate_compare_long_string_same_encoding(bool isLL) { +++ __ align(CodeEntryAlignment); +++ StubCodeMark mark(this, "StubRoutines", isLL ? +++ "compare_long_string_same_encoding LL" : "compare_long_string_same_encoding UU"); +++ address entry = __ pc(); +++ Label SMALL_LOOP, CHECK_LAST, DIFF2, TAIL, +++ LENGTH_DIFF, DIFF, LAST_CHECK_AND_LENGTH_DIFF; +++ const Register result = x10, str1 = x11, cnt1 = x12, str2 = x13, cnt2 = x14, +++ tmp1 = x28, tmp2 = x29, tmp3 = x30, tmp4 = x7, tmp5 = x31; +++ RegSet spilled_regs = RegSet::of(tmp4, tmp5); +++ +++ // cnt1/cnt2 contains amount of characters to compare. cnt1 can be re-used +++ // update cnt2 counter with already loaded 8 bytes +++ __ sub(cnt2, cnt2, wordSize / (isLL ? 1 : 2)); +++ // update pointers, because of previous read +++ __ add(str1, str1, wordSize); +++ __ add(str2, str2, wordSize); +++ // less than 16 bytes left? +++ __ sub(cnt2, cnt2, isLL ? 16 : 8); +++ __ push_reg(spilled_regs, sp); +++ __ bltz(cnt2, TAIL); +++ __ bind(SMALL_LOOP); +++ compare_string_16_bytes_same(DIFF, DIFF2); +++ __ sub(cnt2, cnt2, isLL ? 16 : 8); +++ __ bgez(cnt2, SMALL_LOOP); +++ __ bind(TAIL); +++ __ addi(cnt2, cnt2, isLL ? 16 : 8); +++ __ beqz(cnt2, LAST_CHECK_AND_LENGTH_DIFF); +++ __ sub(cnt2, cnt2, isLL ? 8 : 4); +++ __ blez(cnt2, CHECK_LAST); +++ __ xorr(tmp4, tmp1, tmp2); +++ __ bnez(tmp4, DIFF); +++ __ ld(tmp1, Address(str1)); +++ __ addi(str1, str1, 8); +++ __ ld(tmp2, Address(str2)); +++ __ addi(str2, str2, 8); +++ __ sub(cnt2, cnt2, isLL ? 8 : 4); +++ __ bind(CHECK_LAST); +++ if (!isLL) { +++ __ add(cnt2, cnt2, cnt2); // now in bytes +++ } +++ __ xorr(tmp4, tmp1, tmp2); +++ __ bnez(tmp4, DIFF); +++ __ add(str1, str1, cnt2); +++ __ ld(tmp5, Address(str1)); +++ __ add(str2, str2, cnt2); +++ __ ld(cnt1, Address(str2)); +++ __ xorr(tmp4, tmp5, cnt1); +++ __ beqz(tmp4, LENGTH_DIFF); +++ // Find the first different characters in the longwords and +++ // compute their difference. +++ __ bind(DIFF2); +++ __ ctzc_bit(tmp3, tmp4, isLL); // count zero from lsb to msb +++ __ srl(tmp5, tmp5, tmp3); +++ __ srl(cnt1, cnt1, tmp3); +++ if (isLL) { +++ __ andi(tmp5, tmp5, 0xFF); +++ __ andi(cnt1, cnt1, 0xFF); +++ } else { +++ __ andi(tmp5, tmp5, 0xFFFF); +++ __ andi(cnt1, cnt1, 0xFFFF); +++ } +++ __ sub(result, tmp5, cnt1); +++ __ j(LENGTH_DIFF); +++ __ bind(DIFF); +++ __ ctzc_bit(tmp3, tmp4, isLL); // count zero from lsb to msb +++ __ srl(tmp1, tmp1, tmp3); +++ __ srl(tmp2, tmp2, tmp3); +++ if (isLL) { +++ __ andi(tmp1, tmp1, 0xFF); +++ __ andi(tmp2, tmp2, 0xFF); +++ } else { +++ __ andi(tmp1, tmp1, 0xFFFF); +++ __ andi(tmp2, tmp2, 0xFFFF); +++ } +++ __ sub(result, tmp1, tmp2); +++ __ j(LENGTH_DIFF); +++ __ bind(LAST_CHECK_AND_LENGTH_DIFF); +++ __ xorr(tmp4, tmp1, tmp2); +++ __ bnez(tmp4, DIFF); +++ __ bind(LENGTH_DIFF); +++ __ pop_reg(spilled_regs, sp); +++ __ ret(); +++ return entry; +++ } +++ +++ void generate_compare_long_strings() { +++ StubRoutines::riscv::_compare_long_string_LL = generate_compare_long_string_same_encoding(true); +++ StubRoutines::riscv::_compare_long_string_UU = generate_compare_long_string_same_encoding(false); +++ StubRoutines::riscv::_compare_long_string_LU = generate_compare_long_string_different_encoding(true); +++ StubRoutines::riscv::_compare_long_string_UL = generate_compare_long_string_different_encoding(false); +++ } +++ +++ // x10 result +++ // x11 src +++ // x12 src count +++ // x13 pattern +++ // x14 pattern count +++ address generate_string_indexof_linear(bool needle_isL, bool haystack_isL) +++ { +++ const char* stubName = needle_isL +++ ? (haystack_isL ? "indexof_linear_ll" : "indexof_linear_ul") +++ : "indexof_linear_uu"; +++ __ align(CodeEntryAlignment); +++ StubCodeMark mark(this, "StubRoutines", stubName); +++ address entry = __ pc(); +++ +++ int needle_chr_size = needle_isL ? 1 : 2; +++ int haystack_chr_size = haystack_isL ? 1 : 2; +++ int needle_chr_shift = needle_isL ? 0 : 1; +++ int haystack_chr_shift = haystack_isL ? 0 : 1; +++ bool isL = needle_isL && haystack_isL; +++ // parameters +++ Register result = x10, haystack = x11, haystack_len = x12, needle = x13, needle_len = x14; +++ // temporary registers +++ Register mask1 = x20, match_mask = x21, first = x22, trailing_zeros = x23, mask2 = x24, tmp = x25; +++ // redefinitions +++ Register ch1 = x28, ch2 = x29; +++ RegSet spilled_regs = RegSet::range(x20, x25) + RegSet::range(x28, x29); +++ +++ __ push_reg(spilled_regs, sp); +++ +++ Label L_LOOP, L_LOOP_PROCEED, L_SMALL, L_HAS_ZERO, +++ L_HAS_ZERO_LOOP, L_CMP_LOOP, L_CMP_LOOP_NOMATCH, L_SMALL_PROCEED, +++ L_SMALL_HAS_ZERO_LOOP, L_SMALL_CMP_LOOP_NOMATCH, L_SMALL_CMP_LOOP, +++ L_POST_LOOP, L_CMP_LOOP_LAST_CMP, L_HAS_ZERO_LOOP_NOMATCH, +++ L_SMALL_CMP_LOOP_LAST_CMP, L_SMALL_CMP_LOOP_LAST_CMP2, +++ L_CMP_LOOP_LAST_CMP2, DONE, NOMATCH; +++ +++ __ ld(ch1, Address(needle)); +++ __ ld(ch2, Address(haystack)); +++ // src.length - pattern.length +++ __ sub(haystack_len, haystack_len, needle_len); +++ +++ // first is needle[0] +++ __ andi(first, ch1, needle_isL ? 0xFF : 0xFFFF, first); +++ uint64_t mask0101 = UCONST64(0x0101010101010101); +++ uint64_t mask0001 = UCONST64(0x0001000100010001); +++ __ mv(mask1, haystack_isL ? mask0101 : mask0001); +++ __ mul(first, first, mask1); +++ uint64_t mask7f7f = UCONST64(0x7f7f7f7f7f7f7f7f); +++ uint64_t mask7fff = UCONST64(0x7fff7fff7fff7fff); +++ __ mv(mask2, haystack_isL ? mask7f7f : mask7fff); +++ if (needle_isL != haystack_isL) { +++ __ mv(tmp, ch1); +++ } +++ __ sub(haystack_len, haystack_len, wordSize / haystack_chr_size - 1); +++ __ blez(haystack_len, L_SMALL); +++ +++ if (needle_isL != haystack_isL) { +++ __ inflate_lo32(ch1, tmp, match_mask, trailing_zeros); +++ } +++ // xorr, sub, orr, notr, andr +++ // compare and set match_mask[i] with 0x80/0x8000 (Latin1/UTF16) if ch2[i] == first[i] +++ // eg: +++ // first: aa aa aa aa aa aa aa aa +++ // ch2: aa aa li nx jd ka aa aa +++ // match_mask: 80 80 00 00 00 00 80 80 +++ __ compute_match_mask(ch2, first, match_mask, mask1, mask2); +++ +++ // search first char of needle, if success, goto L_HAS_ZERO; +++ __ bnez(match_mask, L_HAS_ZERO); +++ __ sub(haystack_len, haystack_len, wordSize / haystack_chr_size); +++ __ add(result, result, wordSize / haystack_chr_size); +++ __ add(haystack, haystack, wordSize); +++ __ bltz(haystack_len, L_POST_LOOP); +++ +++ __ bind(L_LOOP); +++ __ ld(ch2, Address(haystack)); +++ __ compute_match_mask(ch2, first, match_mask, mask1, mask2); +++ __ bnez(match_mask, L_HAS_ZERO); +++ +++ __ bind(L_LOOP_PROCEED); +++ __ sub(haystack_len, haystack_len, wordSize / haystack_chr_size); +++ __ add(haystack, haystack, wordSize); +++ __ add(result, result, wordSize / haystack_chr_size); +++ __ bgez(haystack_len, L_LOOP); +++ +++ __ bind(L_POST_LOOP); +++ __ mv(ch2, -wordSize / haystack_chr_size); +++ __ ble(haystack_len, ch2, NOMATCH); // no extra characters to check +++ __ ld(ch2, Address(haystack)); +++ __ slli(haystack_len, haystack_len, LogBitsPerByte + haystack_chr_shift); +++ __ neg(haystack_len, haystack_len); +++ __ xorr(ch2, first, ch2); +++ __ sub(match_mask, ch2, mask1); +++ __ orr(ch2, ch2, mask2); +++ __ mv(trailing_zeros, -1); // all bits set +++ __ j(L_SMALL_PROCEED); +++ +++ __ align(OptoLoopAlignment); +++ __ bind(L_SMALL); +++ __ slli(haystack_len, haystack_len, LogBitsPerByte + haystack_chr_shift); +++ __ neg(haystack_len, haystack_len); +++ if (needle_isL != haystack_isL) { +++ __ inflate_lo32(ch1, tmp, match_mask, trailing_zeros); +++ } +++ __ xorr(ch2, first, ch2); +++ __ sub(match_mask, ch2, mask1); +++ __ orr(ch2, ch2, mask2); +++ __ mv(trailing_zeros, -1); // all bits set +++ +++ __ bind(L_SMALL_PROCEED); +++ __ srl(trailing_zeros, trailing_zeros, haystack_len); // mask. zeroes on useless bits. +++ __ notr(ch2, ch2); +++ __ andr(match_mask, match_mask, ch2); +++ __ andr(match_mask, match_mask, trailing_zeros); // clear useless bits and check +++ __ beqz(match_mask, NOMATCH); +++ +++ __ bind(L_SMALL_HAS_ZERO_LOOP); +++ __ ctzc_bit(trailing_zeros, match_mask, haystack_isL, ch2, tmp); // count trailing zeros +++ __ addi(trailing_zeros, trailing_zeros, haystack_isL ? 7 : 15); +++ __ mv(ch2, wordSize / haystack_chr_size); +++ __ ble(needle_len, ch2, L_SMALL_CMP_LOOP_LAST_CMP2); +++ __ compute_index(haystack, trailing_zeros, match_mask, result, ch2, tmp, haystack_isL); +++ __ mv(trailing_zeros, wordSize / haystack_chr_size); +++ __ bne(ch1, ch2, L_SMALL_CMP_LOOP_NOMATCH); +++ +++ __ bind(L_SMALL_CMP_LOOP); +++ __ shadd(first, trailing_zeros, needle, first, needle_chr_shift); +++ __ shadd(ch2, trailing_zeros, haystack, ch2, haystack_chr_shift); +++ needle_isL ? __ lbu(first, Address(first)) : __ lhu(first, Address(first)); +++ haystack_isL ? __ lbu(ch2, Address(ch2)) : __ lhu(ch2, Address(ch2)); +++ __ add(trailing_zeros, trailing_zeros, 1); +++ __ bge(trailing_zeros, needle_len, L_SMALL_CMP_LOOP_LAST_CMP); +++ __ beq(first, ch2, L_SMALL_CMP_LOOP); +++ +++ __ bind(L_SMALL_CMP_LOOP_NOMATCH); +++ __ beqz(match_mask, NOMATCH); +++ __ ctzc_bit(trailing_zeros, match_mask, haystack_isL, tmp, ch2); +++ __ addi(trailing_zeros, trailing_zeros, haystack_isL ? 7 : 15); +++ __ add(result, result, 1); +++ __ add(haystack, haystack, haystack_chr_size); +++ __ j(L_SMALL_HAS_ZERO_LOOP); +++ +++ __ align(OptoLoopAlignment); +++ __ bind(L_SMALL_CMP_LOOP_LAST_CMP); +++ __ bne(first, ch2, L_SMALL_CMP_LOOP_NOMATCH); +++ __ j(DONE); +++ +++ __ align(OptoLoopAlignment); +++ __ bind(L_SMALL_CMP_LOOP_LAST_CMP2); +++ __ compute_index(haystack, trailing_zeros, match_mask, result, ch2, tmp, haystack_isL); +++ __ bne(ch1, ch2, L_SMALL_CMP_LOOP_NOMATCH); +++ __ j(DONE); +++ +++ __ align(OptoLoopAlignment); +++ __ bind(L_HAS_ZERO); +++ __ ctzc_bit(trailing_zeros, match_mask, haystack_isL, tmp, ch2); +++ __ addi(trailing_zeros, trailing_zeros, haystack_isL ? 7 : 15); +++ __ slli(needle_len, needle_len, BitsPerByte * wordSize / 2); +++ __ orr(haystack_len, haystack_len, needle_len); // restore needle_len(32bits) +++ __ sub(result, result, 1); // array index from 0, so result -= 1 +++ +++ __ bind(L_HAS_ZERO_LOOP); +++ __ mv(needle_len, wordSize / haystack_chr_size); +++ __ srli(ch2, haystack_len, BitsPerByte * wordSize / 2); +++ __ bge(needle_len, ch2, L_CMP_LOOP_LAST_CMP2); +++ // load next 8 bytes from haystack, and increase result index +++ __ compute_index(haystack, trailing_zeros, match_mask, result, ch2, tmp, haystack_isL); +++ __ add(result, result, 1); +++ __ mv(trailing_zeros, wordSize / haystack_chr_size); +++ __ bne(ch1, ch2, L_CMP_LOOP_NOMATCH); +++ +++ // compare one char +++ __ bind(L_CMP_LOOP); +++ __ shadd(needle_len, trailing_zeros, needle, needle_len, needle_chr_shift); +++ needle_isL ? __ lbu(needle_len, Address(needle_len)) : __ lhu(needle_len, Address(needle_len)); +++ __ shadd(ch2, trailing_zeros, haystack, ch2, haystack_chr_shift); +++ haystack_isL ? __ lbu(ch2, Address(ch2)) : __ lhu(ch2, Address(ch2)); +++ __ add(trailing_zeros, trailing_zeros, 1); // next char index +++ __ srli(tmp, haystack_len, BitsPerByte * wordSize / 2); +++ __ bge(trailing_zeros, tmp, L_CMP_LOOP_LAST_CMP); +++ __ beq(needle_len, ch2, L_CMP_LOOP); +++ +++ __ bind(L_CMP_LOOP_NOMATCH); +++ __ beqz(match_mask, L_HAS_ZERO_LOOP_NOMATCH); +++ __ ctzc_bit(trailing_zeros, match_mask, haystack_isL, needle_len, ch2); // find next "first" char index +++ __ addi(trailing_zeros, trailing_zeros, haystack_isL ? 7 : 15); +++ __ add(haystack, haystack, haystack_chr_size); +++ __ j(L_HAS_ZERO_LOOP); +++ +++ __ align(OptoLoopAlignment); +++ __ bind(L_CMP_LOOP_LAST_CMP); +++ __ bne(needle_len, ch2, L_CMP_LOOP_NOMATCH); +++ __ j(DONE); +++ +++ __ align(OptoLoopAlignment); +++ __ bind(L_CMP_LOOP_LAST_CMP2); +++ __ compute_index(haystack, trailing_zeros, match_mask, result, ch2, tmp, haystack_isL); +++ __ add(result, result, 1); +++ __ bne(ch1, ch2, L_CMP_LOOP_NOMATCH); +++ __ j(DONE); +++ +++ __ align(OptoLoopAlignment); +++ __ bind(L_HAS_ZERO_LOOP_NOMATCH); +++ // 1) Restore "result" index. Index was wordSize/str2_chr_size * N until +++ // L_HAS_ZERO block. Byte octet was analyzed in L_HAS_ZERO_LOOP, +++ // so, result was increased at max by wordSize/str2_chr_size - 1, so, +++ // respective high bit wasn't changed. L_LOOP_PROCEED will increase +++ // result by analyzed characters value, so, we can just reset lower bits +++ // in result here. Clear 2 lower bits for UU/UL and 3 bits for LL +++ // 2) restore needle_len and haystack_len values from "compressed" haystack_len +++ // 3) advance haystack value to represent next haystack octet. result & 7/3 is +++ // index of last analyzed substring inside current octet. So, haystack in at +++ // respective start address. We need to advance it to next octet +++ __ andi(match_mask, result, wordSize / haystack_chr_size - 1); +++ __ srli(needle_len, haystack_len, BitsPerByte * wordSize / 2); +++ __ andi(result, result, haystack_isL ? -8 : -4); +++ __ slli(tmp, match_mask, haystack_chr_shift); +++ __ sub(haystack, haystack, tmp); +++ __ sign_extend(haystack_len, haystack_len, 32); +++ __ j(L_LOOP_PROCEED); +++ +++ __ align(OptoLoopAlignment); +++ __ bind(NOMATCH); +++ __ mv(result, -1); +++ +++ __ bind(DONE); +++ __ pop_reg(spilled_regs, sp); +++ __ ret(); +++ return entry; +++ } +++ +++ void generate_string_indexof_stubs() +++ { +++ StubRoutines::riscv::_string_indexof_linear_ll = generate_string_indexof_linear(true, true); +++ StubRoutines::riscv::_string_indexof_linear_uu = generate_string_indexof_linear(false, false); +++ StubRoutines::riscv::_string_indexof_linear_ul = generate_string_indexof_linear(true, false); +++ } +++ +++#ifdef COMPILER2 +++ address generate_mulAdd() +++ { +++ __ align(CodeEntryAlignment); +++ StubCodeMark mark(this, "StubRoutines", "mulAdd"); +++ +++ address entry = __ pc(); +++ +++ const Register out = x10; +++ const Register in = x11; +++ const Register offset = x12; +++ const Register len = x13; +++ const Register k = x14; +++ const Register tmp = x28; +++ +++ BLOCK_COMMENT("Entry:"); +++ __ enter(); +++ __ mul_add(out, in, offset, len, k, tmp); +++ __ leave(); +++ __ ret(); +++ +++ return entry; +++ } +++ +++ /** +++ * Arguments: +++ * +++ * Input: +++ * c_rarg0 - x address +++ * c_rarg1 - x length +++ * c_rarg2 - y address +++ * c_rarg3 - y length +++ * c_rarg4 - z address +++ * c_rarg5 - z length +++ */ +++ address generate_multiplyToLen() +++ { +++ __ align(CodeEntryAlignment); +++ StubCodeMark mark(this, "StubRoutines", "multiplyToLen"); +++ address entry = __ pc(); +++ +++ const Register x = x10; +++ const Register xlen = x11; +++ const Register y = x12; +++ const Register ylen = x13; +++ const Register z = x14; +++ const Register zlen = x15; +++ +++ const Register tmp1 = x16; +++ const Register tmp2 = x17; +++ const Register tmp3 = x7; +++ const Register tmp4 = x28; +++ const Register tmp5 = x29; +++ const Register tmp6 = x30; +++ const Register tmp7 = x31; +++ +++ BLOCK_COMMENT("Entry:"); +++ __ enter(); // required for proper stackwalking of RuntimeStub frame +++ __ multiply_to_len(x, xlen, y, ylen, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7); +++ __ leave(); // required for proper stackwalking of RuntimeStub frame +++ __ ret(); +++ +++ return entry; +++ } +++ +++ address generate_squareToLen() +++ { +++ __ align(CodeEntryAlignment); +++ StubCodeMark mark(this, "StubRoutines", "squareToLen"); +++ address entry = __ pc(); +++ +++ const Register x = x10; +++ const Register xlen = x11; +++ const Register z = x12; +++ const Register zlen = x13; +++ const Register y = x14; // == x +++ const Register ylen = x15; // == xlen +++ +++ const Register tmp1 = x16; +++ const Register tmp2 = x17; +++ const Register tmp3 = x7; +++ const Register tmp4 = x28; +++ const Register tmp5 = x29; +++ const Register tmp6 = x30; +++ const Register tmp7 = x31; +++ +++ BLOCK_COMMENT("Entry:"); +++ __ enter(); +++ __ mv(y, x); +++ __ mv(ylen, xlen); +++ __ multiply_to_len(x, xlen, y, ylen, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7); +++ __ leave(); +++ __ ret(); +++ +++ return entry; +++ } +++ +++ // Arguments: +++ // +++ // Input: +++ // c_rarg0 - newArr address +++ // c_rarg1 - oldArr address +++ // c_rarg2 - newIdx +++ // c_rarg3 - shiftCount +++ // c_rarg4 - numIter +++ // +++ address generate_bigIntegerLeftShift() { +++ __ align(CodeEntryAlignment); +++ StubCodeMark mark(this, "StubRoutines", "bigIntegerLeftShiftWorker"); +++ address entry = __ pc(); +++ +++ Label loop, exit; +++ +++ Register newArr = c_rarg0; +++ Register oldArr = c_rarg1; +++ Register newIdx = c_rarg2; +++ Register shiftCount = c_rarg3; +++ Register numIter = c_rarg4; +++ +++ Register shiftRevCount = c_rarg5; +++ Register oldArrNext = t1; +++ +++ __ beqz(numIter, exit); +++ __ shadd(newArr, newIdx, newArr, t0, 2); +++ +++ __ mv(shiftRevCount, 32); +++ __ sub(shiftRevCount, shiftRevCount, shiftCount); +++ +++ __ bind(loop); +++ __ addi(oldArrNext, oldArr, 4); +++ __ vsetvli(t0, numIter, Assembler::e32, Assembler::m4); +++ __ vle32_v(v0, oldArr); +++ __ vle32_v(v4, oldArrNext); +++ __ vsll_vx(v0, v0, shiftCount); +++ __ vsrl_vx(v4, v4, shiftRevCount); +++ __ vor_vv(v0, v0, v4); +++ __ vse32_v(v0, newArr); +++ __ sub(numIter, numIter, t0); +++ __ shadd(oldArr, t0, oldArr, t1, 2); +++ __ shadd(newArr, t0, newArr, t1, 2); +++ __ bnez(numIter, loop); +++ +++ __ bind(exit); +++ __ ret(); +++ +++ return entry; +++ } +++ +++ // Arguments: +++ // +++ // Input: +++ // c_rarg0 - newArr address +++ // c_rarg1 - oldArr address +++ // c_rarg2 - newIdx +++ // c_rarg3 - shiftCount +++ // c_rarg4 - numIter +++ // +++ address generate_bigIntegerRightShift() { +++ __ align(CodeEntryAlignment); +++ StubCodeMark mark(this, "StubRoutines", "bigIntegerRightShiftWorker"); +++ address entry = __ pc(); +++ +++ Label loop, exit; +++ +++ Register newArr = c_rarg0; +++ Register oldArr = c_rarg1; +++ Register newIdx = c_rarg2; +++ Register shiftCount = c_rarg3; +++ Register numIter = c_rarg4; +++ Register idx = numIter; +++ +++ Register shiftRevCount = c_rarg5; +++ Register oldArrNext = c_rarg6; +++ Register newArrCur = t0; +++ Register oldArrCur = t1; +++ +++ __ beqz(idx, exit); +++ __ shadd(newArr, newIdx, newArr, t0, 2); +++ +++ __ mv(shiftRevCount, 32); +++ __ sub(shiftRevCount, shiftRevCount, shiftCount); +++ +++ __ bind(loop); +++ __ vsetvli(t0, idx, Assembler::e32, Assembler::m4); +++ __ sub(idx, idx, t0); +++ __ shadd(oldArrNext, idx, oldArr, t1, 2); +++ __ shadd(newArrCur, idx, newArr, t1, 2); +++ __ addi(oldArrCur, oldArrNext, 4); +++ __ vle32_v(v0, oldArrCur); +++ __ vle32_v(v4, oldArrNext); +++ __ vsrl_vx(v0, v0, shiftCount); +++ __ vsll_vx(v4, v4, shiftRevCount); +++ __ vor_vv(v0, v0, v4); +++ __ vse32_v(v0, newArrCur); +++ __ bnez(idx, loop); +++ +++ __ bind(exit); +++ __ ret(); +++ +++ return entry; +++ } +++#endif +++ +++#ifdef COMPILER2 +++ class MontgomeryMultiplyGenerator : public MacroAssembler { +++ +++ Register Pa_base, Pb_base, Pn_base, Pm_base, inv, Rlen, Ra, Rb, Rm, Rn, +++ Pa, Pb, Pn, Pm, Rhi_ab, Rlo_ab, Rhi_mn, Rlo_mn, tmp0, tmp1, tmp2, Ri, Rj; +++ +++ RegSet _toSave; +++ bool _squaring; +++ +++ public: +++ MontgomeryMultiplyGenerator (Assembler *as, bool squaring) +++ : MacroAssembler(as->code()), _squaring(squaring) { +++ +++ // Register allocation +++ +++ Register reg = c_rarg0; +++ Pa_base = reg; // Argument registers +++ if (squaring) { +++ Pb_base = Pa_base; +++ } else { +++ Pb_base = ++reg; +++ } +++ Pn_base = ++reg; +++ Rlen= ++reg; +++ inv = ++reg; +++ Pm_base = ++reg; +++ +++ // Working registers: +++ Ra = ++reg; // The current digit of a, b, n, and m. +++ Rb = ++reg; +++ Rm = ++reg; +++ Rn = ++reg; +++ +++ Pa = ++reg; // Pointers to the current/next digit of a, b, n, and m. +++ Pb = ++reg; +++ Pm = ++reg; +++ Pn = ++reg; +++ +++ tmp0 = ++reg; // Three registers which form a +++ tmp1 = ++reg; // triple-precision accumuator. +++ tmp2 = ++reg; +++ +++ Ri = x6; // Inner and outer loop indexes. +++ Rj = x7; +++ +++ Rhi_ab = x28; // Product registers: low and high parts +++ Rlo_ab = x29; // of a*b and m*n. +++ Rhi_mn = x30; +++ Rlo_mn = x31; +++ +++ // x18 and up are callee-saved. +++ _toSave = RegSet::range(x18, reg) + Pm_base; +++ } +++ +++ private: +++ void save_regs() { +++ push_reg(_toSave, sp); +++ } +++ +++ void restore_regs() { +++ pop_reg(_toSave, sp); +++ } +++ +++ template +++ void unroll_2(Register count, T block) { +++ Label loop, end, odd; +++ beqz(count, end); +++ test_bit(t0, count, 0); +++ bnez(t0, odd); +++ align(16); +++ bind(loop); +++ (this->*block)(); +++ bind(odd); +++ (this->*block)(); +++ addi(count, count, -2); +++ bgtz(count, loop); +++ bind(end); +++ } +++ +++ template +++ void unroll_2(Register count, T block, Register d, Register s, Register tmp) { +++ Label loop, end, odd; +++ beqz(count, end); +++ test_bit(tmp, count, 0); +++ bnez(tmp, odd); +++ align(16); +++ bind(loop); +++ (this->*block)(d, s, tmp); +++ bind(odd); +++ (this->*block)(d, s, tmp); +++ addi(count, count, -2); +++ bgtz(count, loop); +++ bind(end); +++ } +++ +++ void pre1(RegisterOrConstant i) { +++ block_comment("pre1"); +++ // Pa = Pa_base; +++ // Pb = Pb_base + i; +++ // Pm = Pm_base; +++ // Pn = Pn_base + i; +++ // Ra = *Pa; +++ // Rb = *Pb; +++ // Rm = *Pm; +++ // Rn = *Pn; +++ if (i.is_register()) { +++ slli(t0, i.as_register(), LogBytesPerWord); +++ } else { +++ mv(t0, i.as_constant()); +++ slli(t0, t0, LogBytesPerWord); +++ } +++ +++ mv(Pa, Pa_base); +++ add(Pb, Pb_base, t0); +++ mv(Pm, Pm_base); +++ add(Pn, Pn_base, t0); +++ +++ ld(Ra, Address(Pa)); +++ ld(Rb, Address(Pb)); +++ ld(Rm, Address(Pm)); +++ ld(Rn, Address(Pn)); +++ +++ // Zero the m*n result. +++ mv(Rhi_mn, zr); +++ mv(Rlo_mn, zr); +++ } +++ +++ // The core multiply-accumulate step of a Montgomery +++ // multiplication. The idea is to schedule operations as a +++ // pipeline so that instructions with long latencies (loads and +++ // multiplies) have time to complete before their results are +++ // used. This most benefits in-order implementations of the +++ // architecture but out-of-order ones also benefit. +++ void step() { +++ block_comment("step"); +++ // MACC(Ra, Rb, tmp0, tmp1, tmp2); +++ // Ra = *++Pa; +++ // Rb = *--Pb; +++ mulhu(Rhi_ab, Ra, Rb); +++ mul(Rlo_ab, Ra, Rb); +++ addi(Pa, Pa, wordSize); +++ ld(Ra, Address(Pa)); +++ addi(Pb, Pb, -wordSize); +++ ld(Rb, Address(Pb)); +++ acc(Rhi_mn, Rlo_mn, tmp0, tmp1, tmp2); // The pending m*n from the +++ // previous iteration. +++ // MACC(Rm, Rn, tmp0, tmp1, tmp2); +++ // Rm = *++Pm; +++ // Rn = *--Pn; +++ mulhu(Rhi_mn, Rm, Rn); +++ mul(Rlo_mn, Rm, Rn); +++ addi(Pm, Pm, wordSize); +++ ld(Rm, Address(Pm)); +++ addi(Pn, Pn, -wordSize); +++ ld(Rn, Address(Pn)); +++ acc(Rhi_ab, Rlo_ab, tmp0, tmp1, tmp2); +++ } +++ +++ void post1() { +++ block_comment("post1"); +++ +++ // MACC(Ra, Rb, tmp0, tmp1, tmp2); +++ // Ra = *++Pa; +++ // Rb = *--Pb; +++ mulhu(Rhi_ab, Ra, Rb); +++ mul(Rlo_ab, Ra, Rb); +++ acc(Rhi_mn, Rlo_mn, tmp0, tmp1, tmp2); // The pending m*n +++ acc(Rhi_ab, Rlo_ab, tmp0, tmp1, tmp2); +++ +++ // *Pm = Rm = tmp0 * inv; +++ mul(Rm, tmp0, inv); +++ sd(Rm, Address(Pm)); +++ +++ // MACC(Rm, Rn, tmp0, tmp1, tmp2); +++ // tmp0 = tmp1; tmp1 = tmp2; tmp2 = 0; +++ mulhu(Rhi_mn, Rm, Rn); +++ +++#ifndef PRODUCT +++ // assert(m[i] * n[0] + tmp0 == 0, "broken Montgomery multiply"); +++ { +++ mul(Rlo_mn, Rm, Rn); +++ add(Rlo_mn, tmp0, Rlo_mn); +++ Label ok; +++ beqz(Rlo_mn, ok); +++ stop("broken Montgomery multiply"); +++ bind(ok); +++ } +++#endif +++ // We have very carefully set things up so that +++ // m[i]*n[0] + tmp0 == 0 (mod b), so we don't have to calculate +++ // the lower half of Rm * Rn because we know the result already: +++ // it must be -tmp0. tmp0 + (-tmp0) must generate a carry iff +++ // tmp0 != 0. So, rather than do a mul and an cad we just set +++ // the carry flag iff tmp0 is nonzero. +++ // +++ // mul(Rlo_mn, Rm, Rn); +++ // cad(zr, tmp0, Rlo_mn); +++ addi(t0, tmp0, -1); +++ sltu(t0, t0, tmp0); // Set carry iff tmp0 is nonzero +++ cadc(tmp0, tmp1, Rhi_mn, t0); +++ adc(tmp1, tmp2, zr, t0); +++ mv(tmp2, zr); +++ } +++ +++ void pre2(Register i, Register len) { +++ block_comment("pre2"); +++ // Pa = Pa_base + i-len; +++ // Pb = Pb_base + len; +++ // Pm = Pm_base + i-len; +++ // Pn = Pn_base + len; +++ +++ sub(Rj, i, len); +++ // Rj == i-len +++ +++ // Ra as temp register +++ slli(Ra, Rj, LogBytesPerWord); +++ add(Pa, Pa_base, Ra); +++ add(Pm, Pm_base, Ra); +++ slli(Ra, len, LogBytesPerWord); +++ add(Pb, Pb_base, Ra); +++ add(Pn, Pn_base, Ra); +++ +++ // Ra = *++Pa; +++ // Rb = *--Pb; +++ // Rm = *++Pm; +++ // Rn = *--Pn; +++ add(Pa, Pa, wordSize); +++ ld(Ra, Address(Pa)); +++ add(Pb, Pb, -wordSize); +++ ld(Rb, Address(Pb)); +++ add(Pm, Pm, wordSize); +++ ld(Rm, Address(Pm)); +++ add(Pn, Pn, -wordSize); +++ ld(Rn, Address(Pn)); +++ +++ mv(Rhi_mn, zr); +++ mv(Rlo_mn, zr); +++ } +++ +++ void post2(Register i, Register len) { +++ block_comment("post2"); +++ sub(Rj, i, len); +++ +++ cad(tmp0, tmp0, Rlo_mn, t0); // The pending m*n, low part +++ +++ // As soon as we know the least significant digit of our result, +++ // store it. +++ // Pm_base[i-len] = tmp0; +++ // Rj as temp register +++ slli(Rj, Rj, LogBytesPerWord); +++ add(Rj, Pm_base, Rj); +++ sd(tmp0, Address(Rj)); +++ +++ // tmp0 = tmp1; tmp1 = tmp2; tmp2 = 0; +++ cadc(tmp0, tmp1, Rhi_mn, t0); // The pending m*n, high part +++ adc(tmp1, tmp2, zr, t0); +++ mv(tmp2, zr); +++ } +++ +++ // A carry in tmp0 after Montgomery multiplication means that we +++ // should subtract multiples of n from our result in m. We'll +++ // keep doing that until there is no carry. +++ void normalize(Register len) { +++ block_comment("normalize"); +++ // while (tmp0) +++ // tmp0 = sub(Pm_base, Pn_base, tmp0, len); +++ Label loop, post, again; +++ Register cnt = tmp1, i = tmp2; // Re-use registers; we're done with them now +++ beqz(tmp0, post); { +++ bind(again); { +++ mv(i, zr); +++ mv(cnt, len); +++ slli(Rn, i, LogBytesPerWord); +++ add(Rm, Pm_base, Rn); +++ ld(Rm, Address(Rm)); +++ add(Rn, Pn_base, Rn); +++ ld(Rn, Address(Rn)); +++ mv(t0, 1); // set carry flag, i.e. no borrow +++ align(16); +++ bind(loop); { +++ notr(Rn, Rn); +++ add(Rm, Rm, t0); +++ add(Rm, Rm, Rn); +++ sltu(t0, Rm, Rn); +++ slli(Rn, i, LogBytesPerWord); // Rn as temp register +++ add(Rn, Pm_base, Rn); +++ sd(Rm, Address(Rn)); +++ add(i, i, 1); +++ slli(Rn, i, LogBytesPerWord); +++ add(Rm, Pm_base, Rn); +++ ld(Rm, Address(Rm)); +++ add(Rn, Pn_base, Rn); +++ ld(Rn, Address(Rn)); +++ sub(cnt, cnt, 1); +++ } bnez(cnt, loop); +++ addi(tmp0, tmp0, -1); +++ add(tmp0, tmp0, t0); +++ } bnez(tmp0, again); +++ } bind(post); +++ } +++ +++ // Move memory at s to d, reversing words. +++ // Increments d to end of copied memory +++ // Destroys tmp1, tmp2 +++ // Preserves len +++ // Leaves s pointing to the address which was in d at start +++ void reverse(Register d, Register s, Register len, Register tmp1, Register tmp2) { +++ assert(tmp1 < x28 && tmp2 < x28, "register corruption"); +++ +++ slli(tmp1, len, LogBytesPerWord); +++ add(s, s, tmp1); +++ mv(tmp1, len); +++ unroll_2(tmp1, &MontgomeryMultiplyGenerator::reverse1, d, s, tmp2); +++ slli(tmp1, len, LogBytesPerWord); +++ sub(s, d, tmp1); +++ } +++ // [63...0] -> [31...0][63...32] +++ void reverse1(Register d, Register s, Register tmp) { +++ addi(s, s, -wordSize); +++ ld(tmp, Address(s)); +++ ror_imm(tmp, tmp, 32, t0); +++ sd(tmp, Address(d)); +++ addi(d, d, wordSize); +++ } +++ +++ void step_squaring() { +++ // An extra ACC +++ step(); +++ acc(Rhi_ab, Rlo_ab, tmp0, tmp1, tmp2); +++ } +++ +++ void last_squaring(Register i) { +++ Label dont; +++ // if ((i & 1) == 0) { +++ test_bit(t0, i, 0); +++ bnez(t0, dont); { +++ // MACC(Ra, Rb, tmp0, tmp1, tmp2); +++ // Ra = *++Pa; +++ // Rb = *--Pb; +++ mulhu(Rhi_ab, Ra, Rb); +++ mul(Rlo_ab, Ra, Rb); +++ acc(Rhi_ab, Rlo_ab, tmp0, tmp1, tmp2); +++ } bind(dont); +++ } +++ +++ void extra_step_squaring() { +++ acc(Rhi_mn, Rlo_mn, tmp0, tmp1, tmp2); // The pending m*n +++ +++ // MACC(Rm, Rn, tmp0, tmp1, tmp2); +++ // Rm = *++Pm; +++ // Rn = *--Pn; +++ mulhu(Rhi_mn, Rm, Rn); +++ mul(Rlo_mn, Rm, Rn); +++ addi(Pm, Pm, wordSize); +++ ld(Rm, Address(Pm)); +++ addi(Pn, Pn, -wordSize); +++ ld(Rn, Address(Pn)); +++ } +++ +++ void post1_squaring() { +++ acc(Rhi_mn, Rlo_mn, tmp0, tmp1, tmp2); // The pending m*n +++ +++ // *Pm = Rm = tmp0 * inv; +++ mul(Rm, tmp0, inv); +++ sd(Rm, Address(Pm)); +++ +++ // MACC(Rm, Rn, tmp0, tmp1, tmp2); +++ // tmp0 = tmp1; tmp1 = tmp2; tmp2 = 0; +++ mulhu(Rhi_mn, Rm, Rn); +++ +++#ifndef PRODUCT +++ // assert(m[i] * n[0] + tmp0 == 0, "broken Montgomery multiply"); +++ { +++ mul(Rlo_mn, Rm, Rn); +++ add(Rlo_mn, tmp0, Rlo_mn); +++ Label ok; +++ beqz(Rlo_mn, ok); { +++ stop("broken Montgomery multiply"); +++ } bind(ok); +++ } +++#endif +++ // We have very carefully set things up so that +++ // m[i]*n[0] + tmp0 == 0 (mod b), so we don't have to calculate +++ // the lower half of Rm * Rn because we know the result already: +++ // it must be -tmp0. tmp0 + (-tmp0) must generate a carry iff +++ // tmp0 != 0. So, rather than do a mul and a cad we just set +++ // the carry flag iff tmp0 is nonzero. +++ // +++ // mul(Rlo_mn, Rm, Rn); +++ // cad(zr, tmp, Rlo_mn); +++ addi(t0, tmp0, -1); +++ sltu(t0, t0, tmp0); // Set carry iff tmp0 is nonzero +++ cadc(tmp0, tmp1, Rhi_mn, t0); +++ adc(tmp1, tmp2, zr, t0); +++ mv(tmp2, zr); +++ } +++ +++ // use t0 as carry +++ void acc(Register Rhi, Register Rlo, +++ Register tmp0, Register tmp1, Register tmp2) { +++ cad(tmp0, tmp0, Rlo, t0); +++ cadc(tmp1, tmp1, Rhi, t0); +++ adc(tmp2, tmp2, zr, t0); +++ } +++ +++ public: +++ /** +++ * Fast Montgomery multiplication. The derivation of the +++ * algorithm is in A Cryptographic Library for the Motorola +++ * DSP56000, Dusse and Kaliski, Proc. EUROCRYPT 90, pp. 230-237. +++ * +++ * Arguments: +++ * +++ * Inputs for multiplication: +++ * c_rarg0 - int array elements a +++ * c_rarg1 - int array elements b +++ * c_rarg2 - int array elements n (the modulus) +++ * c_rarg3 - int length +++ * c_rarg4 - int inv +++ * c_rarg5 - int array elements m (the result) +++ * +++ * Inputs for squaring: +++ * c_rarg0 - int array elements a +++ * c_rarg1 - int array elements n (the modulus) +++ * c_rarg2 - int length +++ * c_rarg3 - int inv +++ * c_rarg4 - int array elements m (the result) +++ * +++ */ +++ address generate_multiply() { +++ Label argh, nothing; +++ bind(argh); +++ stop("MontgomeryMultiply total_allocation must be <= 8192"); +++ +++ align(CodeEntryAlignment); +++ address entry = pc(); +++ +++ beqz(Rlen, nothing); +++ +++ enter(); +++ +++ // Make room. +++ mv(Ra, 512); +++ bgt(Rlen, Ra, argh); +++ slli(Ra, Rlen, exact_log2(4 * sizeof(jint))); +++ sub(Ra, sp, Ra); +++ andi(sp, Ra, -2 * wordSize); +++ +++ srliw(Rlen, Rlen, 1); // length in longwords = len/2 +++ +++ { +++ // Copy input args, reversing as we go. We use Ra as a +++ // temporary variable. +++ reverse(Ra, Pa_base, Rlen, Ri, Rj); +++ if (!_squaring) +++ reverse(Ra, Pb_base, Rlen, Ri, Rj); +++ reverse(Ra, Pn_base, Rlen, Ri, Rj); +++ } +++ +++ // Push all call-saved registers and also Pm_base which we'll need +++ // at the end. +++ save_regs(); +++ +++#ifndef PRODUCT +++ // assert(inv * n[0] == -1UL, "broken inverse in Montgomery multiply"); +++ { +++ ld(Rn, Address(Pn_base)); +++ mul(Rlo_mn, Rn, inv); +++ mv(t0, -1); +++ Label ok; +++ beq(Rlo_mn, t0, ok); +++ stop("broken inverse in Montgomery multiply"); +++ bind(ok); +++ } +++#endif +++ +++ mv(Pm_base, Ra); +++ +++ mv(tmp0, zr); +++ mv(tmp1, zr); +++ mv(tmp2, zr); +++ +++ block_comment("for (int i = 0; i < len; i++) {"); +++ mv(Ri, zr); { +++ Label loop, end; +++ bge(Ri, Rlen, end); +++ +++ bind(loop); +++ pre1(Ri); +++ +++ block_comment(" for (j = i; j; j--) {"); { +++ mv(Rj, Ri); +++ unroll_2(Rj, &MontgomeryMultiplyGenerator::step); +++ } block_comment(" } // j"); +++ +++ post1(); +++ addw(Ri, Ri, 1); +++ blt(Ri, Rlen, loop); +++ bind(end); +++ block_comment("} // i"); +++ } +++ +++ block_comment("for (int i = len; i < 2*len; i++) {"); +++ mv(Ri, Rlen); { +++ Label loop, end; +++ slli(t0, Rlen, 1); +++ bge(Ri, t0, end); +++ +++ bind(loop); +++ pre2(Ri, Rlen); +++ +++ block_comment(" for (j = len*2-i-1; j; j--) {"); { +++ slliw(Rj, Rlen, 1); +++ subw(Rj, Rj, Ri); +++ subw(Rj, Rj, 1); +++ unroll_2(Rj, &MontgomeryMultiplyGenerator::step); +++ } block_comment(" } // j"); +++ +++ post2(Ri, Rlen); +++ addw(Ri, Ri, 1); +++ slli(t0, Rlen, 1); +++ blt(Ri, t0, loop); +++ bind(end); +++ } +++ block_comment("} // i"); +++ +++ normalize(Rlen); +++ +++ mv(Ra, Pm_base); // Save Pm_base in Ra +++ restore_regs(); // Restore caller's Pm_base +++ +++ // Copy our result into caller's Pm_base +++ reverse(Pm_base, Ra, Rlen, Ri, Rj); +++ +++ leave(); +++ bind(nothing); +++ ret(); +++ +++ return entry; +++ } +++ +++ /** +++ * +++ * Arguments: +++ * +++ * Inputs: +++ * c_rarg0 - int array elements a +++ * c_rarg1 - int array elements n (the modulus) +++ * c_rarg2 - int length +++ * c_rarg3 - int inv +++ * c_rarg4 - int array elements m (the result) +++ * +++ */ +++ address generate_square() { +++ Label argh; +++ bind(argh); +++ stop("MontgomeryMultiply total_allocation must be <= 8192"); +++ +++ align(CodeEntryAlignment); +++ address entry = pc(); +++ +++ enter(); +++ +++ // Make room. +++ mv(Ra, 512); +++ bgt(Rlen, Ra, argh); +++ slli(Ra, Rlen, exact_log2(4 * sizeof(jint))); +++ sub(Ra, sp, Ra); +++ andi(sp, Ra, -2 * wordSize); +++ +++ srliw(Rlen, Rlen, 1); // length in longwords = len/2 +++ +++ { +++ // Copy input args, reversing as we go. We use Ra as a +++ // temporary variable. +++ reverse(Ra, Pa_base, Rlen, Ri, Rj); +++ reverse(Ra, Pn_base, Rlen, Ri, Rj); +++ } +++ +++ // Push all call-saved registers and also Pm_base which we'll need +++ // at the end. +++ save_regs(); +++ +++ mv(Pm_base, Ra); +++ +++ mv(tmp0, zr); +++ mv(tmp1, zr); +++ mv(tmp2, zr); +++ +++ block_comment("for (int i = 0; i < len; i++) {"); +++ mv(Ri, zr); { +++ Label loop, end; +++ bind(loop); +++ bge(Ri, Rlen, end); +++ +++ pre1(Ri); +++ +++ block_comment("for (j = (i+1)/2; j; j--) {"); { +++ addi(Rj, Ri, 1); +++ srliw(Rj, Rj, 1); +++ unroll_2(Rj, &MontgomeryMultiplyGenerator::step_squaring); +++ } block_comment(" } // j"); +++ +++ last_squaring(Ri); +++ +++ block_comment(" for (j = i/2; j; j--) {"); { +++ srliw(Rj, Ri, 1); +++ unroll_2(Rj, &MontgomeryMultiplyGenerator::extra_step_squaring); +++ } block_comment(" } // j"); +++ +++ post1_squaring(); +++ addi(Ri, Ri, 1); +++ blt(Ri, Rlen, loop); +++ +++ bind(end); +++ block_comment("} // i"); +++ } +++ +++ block_comment("for (int i = len; i < 2*len; i++) {"); +++ mv(Ri, Rlen); { +++ Label loop, end; +++ bind(loop); +++ slli(t0, Rlen, 1); +++ bge(Ri, t0, end); +++ +++ pre2(Ri, Rlen); +++ +++ block_comment(" for (j = (2*len-i-1)/2; j; j--) {"); { +++ slli(Rj, Rlen, 1); +++ sub(Rj, Rj, Ri); +++ sub(Rj, Rj, 1); +++ srliw(Rj, Rj, 1); +++ unroll_2(Rj, &MontgomeryMultiplyGenerator::step_squaring); +++ } block_comment(" } // j"); +++ +++ last_squaring(Ri); +++ +++ block_comment(" for (j = (2*len-i)/2; j; j--) {"); { +++ slli(Rj, Rlen, 1); +++ sub(Rj, Rj, Ri); +++ srliw(Rj, Rj, 1); +++ unroll_2(Rj, &MontgomeryMultiplyGenerator::extra_step_squaring); +++ } block_comment(" } // j"); +++ +++ post2(Ri, Rlen); +++ addi(Ri, Ri, 1); +++ slli(t0, Rlen, 1); +++ blt(Ri, t0, loop); +++ +++ bind(end); +++ block_comment("} // i"); +++ } +++ +++ normalize(Rlen); +++ +++ mv(Ra, Pm_base); // Save Pm_base in Ra +++ restore_regs(); // Restore caller's Pm_base +++ +++ // Copy our result into caller's Pm_base +++ reverse(Pm_base, Ra, Rlen, Ri, Rj); +++ +++ leave(); +++ ret(); +++ +++ return entry; +++ } +++ }; +++#endif // COMPILER2 +++ +++ // Continuation point for throwing of implicit exceptions that are +++ // not handled in the current activation. Fabricates an exception +++ // oop and initiates normal exception dispatching in this +++ // frame. Since we need to preserve callee-saved values (currently +++ // only for C2, but done for C1 as well) we need a callee-saved oop +++ // map and therefore have to make these stubs into RuntimeStubs +++ // rather than BufferBlobs. If the compiler needs all registers to +++ // be preserved between the fault point and the exception handler +++ // then it must assume responsibility for that in +++ // AbstractCompiler::continuation_for_implicit_null_exception or +++ // continuation_for_implicit_division_by_zero_exception. All other +++ // implicit exceptions (e.g., NullPointerException or +++ // AbstractMethodError on entry) are either at call sites or +++ // otherwise assume that stack unwinding will be initiated, so +++ // caller saved registers were assumed volatile in the compiler. +++ +++#undef __ +++#define __ masm-> +++ +++ address generate_throw_exception(const char* name, +++ address runtime_entry, +++ Register arg1 = noreg, +++ Register arg2 = noreg) { +++ // Information about frame layout at time of blocking runtime call. +++ // Note that we only have to preserve callee-saved registers since +++ // the compilers are responsible for supplying a continuation point +++ // if they expect all registers to be preserved. +++ // n.b. riscv asserts that frame::arg_reg_save_area_bytes == 0 +++ assert_cond(runtime_entry != NULL); +++ enum layout { +++ fp_off = 0, +++ fp_off2, +++ return_off, +++ return_off2, +++ framesize // inclusive of return address +++ }; +++ +++ const int insts_size = 512; +++ const int locs_size = 64; +++ +++ CodeBuffer code(name, insts_size, locs_size); +++ OopMapSet* oop_maps = new OopMapSet(); +++ MacroAssembler* masm = new MacroAssembler(&code); +++ assert_cond(oop_maps != NULL && masm != NULL); +++ +++ address start = __ pc(); +++ +++ // This is an inlined and slightly modified version of call_VM +++ // which has the ability to fetch the return PC out of +++ // thread-local storage and also sets up last_Java_sp slightly +++ // differently than the real call_VM +++ +++ __ enter(); // Save FP and RA before call +++ +++ assert(is_even(framesize / 2), "sp not 16-byte aligned"); +++ +++ // ra and fp are already in place +++ __ addi(sp, fp, 0 - ((unsigned)framesize << LogBytesPerInt)); // prolog +++ +++ int frame_complete = __ pc() - start; +++ +++ // Set up last_Java_sp and last_Java_fp +++ address the_pc = __ pc(); +++ __ set_last_Java_frame(sp, fp, the_pc, t0); +++ +++ // Call runtime +++ if (arg1 != noreg) { +++ assert(arg2 != c_rarg1, "clobbered"); +++ __ mv(c_rarg1, arg1); +++ } +++ if (arg2 != noreg) { +++ __ mv(c_rarg2, arg2); +++ } +++ __ mv(c_rarg0, xthread); +++ BLOCK_COMMENT("call runtime_entry"); +++ __ call(runtime_entry); +++ +++ // Generate oop map +++ OopMap* map = new OopMap(framesize, 0); +++ assert_cond(map != NULL); +++ +++ oop_maps->add_gc_map(the_pc - start, map); +++ +++ __ reset_last_Java_frame(true); +++ +++ __ leave(); +++ +++ // check for pending exceptions +++#ifdef ASSERT +++ Label L; +++ __ ld(t0, Address(xthread, Thread::pending_exception_offset())); +++ __ bnez(t0, L); +++ __ should_not_reach_here(); +++ __ bind(L); +++#endif // ASSERT +++ __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry())); +++ +++ +++ // codeBlob framesize is in words (not VMRegImpl::slot_size) +++ RuntimeStub* stub = +++ RuntimeStub::new_runtime_stub(name, +++ &code, +++ frame_complete, +++ (framesize >> (LogBytesPerWord - LogBytesPerInt)), +++ oop_maps, false); +++ assert(stub != NULL, "create runtime stub fail!"); +++ return stub->entry_point(); +++ } +++ +++ // Initialization +++ void generate_initial() { +++ // Generate initial stubs and initializes the entry points +++ +++ // entry points that exist in all platforms Note: This is code +++ // that could be shared among different platforms - however the +++ // benefit seems to be smaller than the disadvantage of having a +++ // much more complicated generator structure. See also comment in +++ // stubRoutines.hpp. +++ +++ StubRoutines::_forward_exception_entry = generate_forward_exception(); +++ +++ StubRoutines::_call_stub_entry = +++ generate_call_stub(StubRoutines::_call_stub_return_address); +++ +++ // is referenced by megamorphic call +++ StubRoutines::_catch_exception_entry = generate_catch_exception(); +++ +++ // Build this early so it's available for the interpreter. +++ StubRoutines::_throw_StackOverflowError_entry = +++ generate_throw_exception("StackOverflowError throw_exception", +++ CAST_FROM_FN_PTR(address, +++ SharedRuntime::throw_StackOverflowError)); +++ StubRoutines::_throw_delayed_StackOverflowError_entry = +++ generate_throw_exception("delayed StackOverflowError throw_exception", +++ CAST_FROM_FN_PTR(address, +++ SharedRuntime::throw_delayed_StackOverflowError)); +++ // Safefetch stubs. +++ generate_safefetch("SafeFetch32", sizeof(int), &StubRoutines::_safefetch32_entry, +++ &StubRoutines::_safefetch32_fault_pc, +++ &StubRoutines::_safefetch32_continuation_pc); +++ generate_safefetch("SafeFetchN", sizeof(intptr_t), &StubRoutines::_safefetchN_entry, +++ &StubRoutines::_safefetchN_fault_pc, +++ &StubRoutines::_safefetchN_continuation_pc); +++ } +++ +++ void generate_all() { +++ // support for verify_oop (must happen after universe_init) +++ StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop(); +++ StubRoutines::_throw_AbstractMethodError_entry = +++ generate_throw_exception("AbstractMethodError throw_exception", +++ CAST_FROM_FN_PTR(address, +++ SharedRuntime:: +++ throw_AbstractMethodError)); +++ +++ StubRoutines::_throw_IncompatibleClassChangeError_entry = +++ generate_throw_exception("IncompatibleClassChangeError throw_exception", +++ CAST_FROM_FN_PTR(address, +++ SharedRuntime:: +++ throw_IncompatibleClassChangeError)); +++ +++ StubRoutines::_throw_NullPointerException_at_call_entry = +++ generate_throw_exception("NullPointerException at call throw_exception", +++ CAST_FROM_FN_PTR(address, +++ SharedRuntime:: +++ throw_NullPointerException_at_call)); +++ // arraycopy stubs used by compilers +++ generate_arraycopy_stubs(); +++ +++#ifdef COMPILER2 +++ if (UseMulAddIntrinsic) { +++ StubRoutines::_mulAdd = generate_mulAdd(); +++ } +++ +++ if (UseMultiplyToLenIntrinsic) { +++ StubRoutines::_multiplyToLen = generate_multiplyToLen(); +++ } +++ +++ if (UseSquareToLenIntrinsic) { +++ StubRoutines::_squareToLen = generate_squareToLen(); +++ } +++ +++ if (UseMontgomeryMultiplyIntrinsic) { +++ StubCodeMark mark(this, "StubRoutines", "montgomeryMultiply"); +++ MontgomeryMultiplyGenerator g(_masm, /*squaring*/false); +++ StubRoutines::_montgomeryMultiply = g.generate_multiply(); +++ } +++ +++ if (UseMontgomerySquareIntrinsic) { +++ StubCodeMark mark(this, "StubRoutines", "montgomerySquare"); +++ MontgomeryMultiplyGenerator g(_masm, /*squaring*/true); +++ StubRoutines::_montgomerySquare = g.generate_square(); +++ } +++ +++ if (UseRVVForBigIntegerShiftIntrinsics) { +++ StubRoutines::_bigIntegerLeftShiftWorker = generate_bigIntegerLeftShift(); +++ StubRoutines::_bigIntegerRightShiftWorker = generate_bigIntegerRightShift(); +++ } +++#endif +++ +++ generate_compare_long_strings(); +++ +++ generate_string_indexof_stubs(); +++ +++ BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod(); +++ if (bs_nm != NULL) { +++ StubRoutines::riscv::_method_entry_barrier = generate_method_entry_barrier(); +++ } +++ +++ StubRoutines::riscv::set_completed(); +++ } +++ +++ public: +++ StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) { +++ if (all) { +++ generate_all(); +++ } else { +++ generate_initial(); +++ } +++ } +++ +++ ~StubGenerator() {} +++}; // end class declaration +++ +++#define UCM_TABLE_MAX_ENTRIES 8 +++void StubGenerator_generate(CodeBuffer* code, bool all) { +++ if (UnsafeCopyMemory::_table == NULL) { +++ UnsafeCopyMemory::create_table(UCM_TABLE_MAX_ENTRIES); +++ } +++ +++ StubGenerator g(code, all); +++} ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/stubRoutines_riscv.cpp ++@@ -0,0 +1,58 @@ +++/* +++ * Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2014, Red Hat Inc. All rights reserved. +++ * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#include "precompiled.hpp" +++#include "runtime/deoptimization.hpp" +++#include "runtime/frame.inline.hpp" +++#include "runtime/stubRoutines.hpp" +++#include "runtime/thread.inline.hpp" +++#include "utilities/globalDefinitions.hpp" +++ +++// Implementation of the platform-specific part of StubRoutines - for +++// a description of how to extend it, see the stubRoutines.hpp file. +++ +++address StubRoutines::riscv::_get_previous_sp_entry = NULL; +++ +++address StubRoutines::riscv::_f2i_fixup = NULL; +++address StubRoutines::riscv::_f2l_fixup = NULL; +++address StubRoutines::riscv::_d2i_fixup = NULL; +++address StubRoutines::riscv::_d2l_fixup = NULL; +++address StubRoutines::riscv::_float_sign_mask = NULL; +++address StubRoutines::riscv::_float_sign_flip = NULL; +++address StubRoutines::riscv::_double_sign_mask = NULL; +++address StubRoutines::riscv::_double_sign_flip = NULL; +++address StubRoutines::riscv::_zero_blocks = NULL; +++address StubRoutines::riscv::_compare_long_string_LL = NULL; +++address StubRoutines::riscv::_compare_long_string_UU = NULL; +++address StubRoutines::riscv::_compare_long_string_LU = NULL; +++address StubRoutines::riscv::_compare_long_string_UL = NULL; +++address StubRoutines::riscv::_string_indexof_linear_ll = NULL; +++address StubRoutines::riscv::_string_indexof_linear_uu = NULL; +++address StubRoutines::riscv::_string_indexof_linear_ul = NULL; +++address StubRoutines::riscv::_large_byte_array_inflate = NULL; +++address StubRoutines::riscv::_method_entry_barrier = NULL; +++ +++bool StubRoutines::riscv::_completed = false; ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/stubRoutines_riscv.hpp ++@@ -0,0 +1,161 @@ +++/* +++ * Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2014, Red Hat Inc. All rights reserved. +++ * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#ifndef CPU_RISCV_STUBROUTINES_RISCV_HPP +++#define CPU_RISCV_STUBROUTINES_RISCV_HPP +++ +++// This file holds the platform specific parts of the StubRoutines +++// definition. See stubRoutines.hpp for a description on how to +++// extend it. +++ +++static bool returns_to_call_stub(address return_pc) { +++ return return_pc == _call_stub_return_address; +++} +++ +++enum platform_dependent_constants { +++ code_size1 = 19000, // simply increase if too small (assembler will crash if too small) +++ code_size2 = 28000 // simply increase if too small (assembler will crash if too small) +++}; +++ +++class riscv { +++ friend class StubGenerator; +++ +++ private: +++ static address _get_previous_sp_entry; +++ +++ static address _f2i_fixup; +++ static address _f2l_fixup; +++ static address _d2i_fixup; +++ static address _d2l_fixup; +++ +++ static address _float_sign_mask; +++ static address _float_sign_flip; +++ static address _double_sign_mask; +++ static address _double_sign_flip; +++ +++ static address _zero_blocks; +++ +++ static address _compare_long_string_LL; +++ static address _compare_long_string_LU; +++ static address _compare_long_string_UL; +++ static address _compare_long_string_UU; +++ static address _string_indexof_linear_ll; +++ static address _string_indexof_linear_uu; +++ static address _string_indexof_linear_ul; +++ static address _large_byte_array_inflate; +++ +++ static address _method_entry_barrier; +++ +++ static bool _completed; +++ +++ public: +++ +++ static address get_previous_sp_entry() { +++ return _get_previous_sp_entry; +++ } +++ +++ static address f2i_fixup() { +++ return _f2i_fixup; +++ } +++ +++ static address f2l_fixup() { +++ return _f2l_fixup; +++ } +++ +++ static address d2i_fixup() { +++ return _d2i_fixup; +++ } +++ +++ static address d2l_fixup() { +++ return _d2l_fixup; +++ } +++ +++ static address float_sign_mask() { +++ return _float_sign_mask; +++ } +++ +++ static address float_sign_flip() { +++ return _float_sign_flip; +++ } +++ +++ static address double_sign_mask() { +++ return _double_sign_mask; +++ } +++ +++ static address double_sign_flip() { +++ return _double_sign_flip; +++ } +++ +++ static address zero_blocks() { +++ return _zero_blocks; +++ } +++ +++ static address compare_long_string_LL() { +++ return _compare_long_string_LL; +++ } +++ +++ static address compare_long_string_LU() { +++ return _compare_long_string_LU; +++ } +++ +++ static address compare_long_string_UL() { +++ return _compare_long_string_UL; +++ } +++ +++ static address compare_long_string_UU() { +++ return _compare_long_string_UU; +++ } +++ +++ static address string_indexof_linear_ul() { +++ return _string_indexof_linear_ul; +++ } +++ +++ static address string_indexof_linear_ll() { +++ return _string_indexof_linear_ll; +++ } +++ +++ static address string_indexof_linear_uu() { +++ return _string_indexof_linear_uu; +++ } +++ +++ static address large_byte_array_inflate() { +++ return _large_byte_array_inflate; +++ } +++ +++ static address method_entry_barrier() { +++ return _method_entry_barrier; +++ } +++ +++ static bool complete() { +++ return _completed; +++ } +++ +++ static void set_completed() { +++ _completed = true; +++ } +++}; +++ +++#endif // CPU_RISCV_STUBROUTINES_RISCV_HPP ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/templateInterpreterGenerator_riscv.cpp ++@@ -0,0 +1,1760 @@ +++/* +++ * Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved. +++ * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#include "precompiled.hpp" +++#include "asm/macroAssembler.inline.hpp" +++#include "classfile/javaClasses.hpp" +++#include "gc/shared/barrierSetAssembler.hpp" +++#include "interpreter/bytecodeHistogram.hpp" +++#include "interpreter/bytecodeTracer.hpp" +++#include "interpreter/interp_masm.hpp" +++#include "interpreter/interpreter.hpp" +++#include "interpreter/interpreterRuntime.hpp" +++#include "interpreter/templateInterpreterGenerator.hpp" +++#include "interpreter/templateTable.hpp" +++#include "memory/resourceArea.hpp" +++#include "oops/arrayOop.hpp" +++#include "oops/method.hpp" +++#include "oops/methodData.hpp" +++#include "oops/oop.inline.hpp" +++#include "prims/jvmtiExport.hpp" +++#include "prims/jvmtiThreadState.hpp" +++#include "runtime/arguments.hpp" +++#include "runtime/deoptimization.hpp" +++#include "runtime/frame.inline.hpp" +++#include "runtime/jniHandles.hpp" +++#include "runtime/sharedRuntime.hpp" +++#include "runtime/stubRoutines.hpp" +++#include "runtime/synchronizer.hpp" +++#include "runtime/timer.hpp" +++#include "runtime/vframeArray.hpp" +++#include "utilities/debug.hpp" +++#include "utilities/powerOfTwo.hpp" +++#include +++ +++#ifndef PRODUCT +++#include "oops/method.hpp" +++#endif // !PRODUCT +++ +++// Size of interpreter code. Increase if too small. Interpreter will +++// fail with a guarantee ("not enough space for interpreter generation"); +++// if too small. +++// Run with +PrintInterpreter to get the VM to print out the size. +++// Max size with JVMTI +++int TemplateInterpreter::InterpreterCodeSize = 256 * 1024; +++ +++#define __ _masm-> +++ +++//----------------------------------------------------------------------------- +++ +++address TemplateInterpreterGenerator::generate_slow_signature_handler() { +++ address entry = __ pc(); +++ +++ __ andi(esp, esp, -16); +++ __ mv(c_rarg3, esp); +++ // xmethod +++ // xlocals +++ // c_rarg3: first stack arg - wordSize +++ // adjust sp +++ +++ __ addi(sp, c_rarg3, -18 * wordSize); +++ __ addi(sp, sp, -2 * wordSize); +++ __ sd(ra, Address(sp, 0)); +++ +++ __ call_VM(noreg, +++ CAST_FROM_FN_PTR(address, +++ InterpreterRuntime::slow_signature_handler), +++ xmethod, xlocals, c_rarg3); +++ +++ // x10: result handler +++ +++ // Stack layout: +++ // sp: return address <- sp +++ // 1 garbage +++ // 8 integer args (if static first is unused) +++ // 1 float/double identifiers +++ // 8 double args +++ // stack args <- esp +++ // garbage +++ // expression stack bottom +++ // bcp (NULL) +++ // ... +++ +++ // Restore ra +++ __ ld(ra, Address(sp, 0)); +++ __ addi(sp, sp , 2 * wordSize); +++ +++ // Do FP first so we can use c_rarg3 as temp +++ __ lwu(c_rarg3, Address(sp, 9 * wordSize)); // float/double identifiers +++ +++ for (int i = 0; i < Argument::n_float_register_parameters_c; i++) { +++ const FloatRegister r = g_FPArgReg[i]; +++ Label d, done; +++ +++ __ test_bit(t0, c_rarg3, i); +++ __ bnez(t0, d); +++ __ flw(r, Address(sp, (10 + i) * wordSize)); +++ __ j(done); +++ __ bind(d); +++ __ fld(r, Address(sp, (10 + i) * wordSize)); +++ __ bind(done); +++ } +++ +++ // c_rarg0 contains the result from the call of +++ // InterpreterRuntime::slow_signature_handler so we don't touch it +++ // here. It will be loaded with the JNIEnv* later. +++ for (int i = 1; i < Argument::n_int_register_parameters_c; i++) { +++ const Register rm = g_INTArgReg[i]; +++ __ ld(rm, Address(sp, i * wordSize)); +++ } +++ +++ __ addi(sp, sp, 18 * wordSize); +++ __ ret(); +++ +++ return entry; +++} +++ +++// Various method entries +++address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) { +++ // xmethod: Method* +++ // x30: sender sp +++ // esp: args +++ +++ if (!InlineIntrinsics) { +++ return NULL; // Generate a vanilla entry +++ } +++ +++ // These don't need a safepoint check because they aren't virtually +++ // callable. We won't enter these intrinsics from compiled code. +++ // If in the future we added an intrinsic which was virtually callable +++ // we'd have to worry about how to safepoint so that this code is used. +++ +++ // mathematical functions inlined by compiler +++ // (interpreter must provide identical implementation +++ // in order to avoid monotonicity bugs when switching +++ // from interpreter to compiler in the middle of some +++ // computation) +++ // +++ // stack: +++ // [ arg ] <-- esp +++ // [ arg ] +++ // retaddr in ra +++ +++ address fn = NULL; +++ address entry_point = NULL; +++ Register continuation = ra; +++ switch (kind) { +++ case Interpreter::java_lang_math_abs: +++ entry_point = __ pc(); +++ __ fld(f10, Address(esp)); +++ __ fabs_d(f10, f10); +++ __ mv(sp, x30); // Restore caller's SP +++ break; +++ case Interpreter::java_lang_math_sqrt: +++ entry_point = __ pc(); +++ __ fld(f10, Address(esp)); +++ __ fsqrt_d(f10, f10); +++ __ mv(sp, x30); +++ break; +++ case Interpreter::java_lang_math_sin : +++ entry_point = __ pc(); +++ __ fld(f10, Address(esp)); +++ __ mv(sp, x30); +++ __ mv(x9, ra); +++ continuation = x9; // The first callee-saved register +++ if (StubRoutines::dsin() == NULL) { +++ fn = CAST_FROM_FN_PTR(address, SharedRuntime::dsin); +++ } else { +++ fn = CAST_FROM_FN_PTR(address, StubRoutines::dsin()); +++ } +++ __ call(fn); +++ break; +++ case Interpreter::java_lang_math_cos : +++ entry_point = __ pc(); +++ __ fld(f10, Address(esp)); +++ __ mv(sp, x30); +++ __ mv(x9, ra); +++ continuation = x9; // The first callee-saved register +++ if (StubRoutines::dcos() == NULL) { +++ fn = CAST_FROM_FN_PTR(address, SharedRuntime::dcos); +++ } else { +++ fn = CAST_FROM_FN_PTR(address, StubRoutines::dcos()); +++ } +++ __ call(fn); +++ break; +++ case Interpreter::java_lang_math_tan : +++ entry_point = __ pc(); +++ __ fld(f10, Address(esp)); +++ __ mv(sp, x30); +++ __ mv(x9, ra); +++ continuation = x9; // The first callee-saved register +++ if (StubRoutines::dtan() == NULL) { +++ fn = CAST_FROM_FN_PTR(address, SharedRuntime::dtan); +++ } else { +++ fn = CAST_FROM_FN_PTR(address, StubRoutines::dtan()); +++ } +++ __ call(fn); +++ break; +++ case Interpreter::java_lang_math_log : +++ entry_point = __ pc(); +++ __ fld(f10, Address(esp)); +++ __ mv(sp, x30); +++ __ mv(x9, ra); +++ continuation = x9; // The first callee-saved register +++ if (StubRoutines::dlog() == NULL) { +++ fn = CAST_FROM_FN_PTR(address, SharedRuntime::dlog); +++ } else { +++ fn = CAST_FROM_FN_PTR(address, StubRoutines::dlog()); +++ } +++ __ call(fn); +++ break; +++ case Interpreter::java_lang_math_log10 : +++ entry_point = __ pc(); +++ __ fld(f10, Address(esp)); +++ __ mv(sp, x30); +++ __ mv(x9, ra); +++ continuation = x9; // The first callee-saved register +++ if (StubRoutines::dlog10() == NULL) { +++ fn = CAST_FROM_FN_PTR(address, SharedRuntime::dlog10); +++ } else { +++ fn = CAST_FROM_FN_PTR(address, StubRoutines::dlog10()); +++ } +++ __ call(fn); +++ break; +++ case Interpreter::java_lang_math_exp : +++ entry_point = __ pc(); +++ __ fld(f10, Address(esp)); +++ __ mv(sp, x30); +++ __ mv(x9, ra); +++ continuation = x9; // The first callee-saved register +++ if (StubRoutines::dexp() == NULL) { +++ fn = CAST_FROM_FN_PTR(address, SharedRuntime::dexp); +++ } else { +++ fn = CAST_FROM_FN_PTR(address, StubRoutines::dexp()); +++ } +++ __ call(fn); +++ break; +++ case Interpreter::java_lang_math_pow : +++ entry_point = __ pc(); +++ __ mv(x9, ra); +++ continuation = x9; +++ __ fld(f10, Address(esp, 2 * Interpreter::stackElementSize)); +++ __ fld(f11, Address(esp)); +++ __ mv(sp, x30); +++ if (StubRoutines::dpow() == NULL) { +++ fn = CAST_FROM_FN_PTR(address, SharedRuntime::dpow); +++ } else { +++ fn = CAST_FROM_FN_PTR(address, StubRoutines::dpow()); +++ } +++ __ call(fn); +++ break; +++ case Interpreter::java_lang_math_fmaD : +++ if (UseFMA) { +++ entry_point = __ pc(); +++ __ fld(f10, Address(esp, 4 * Interpreter::stackElementSize)); +++ __ fld(f11, Address(esp, 2 * Interpreter::stackElementSize)); +++ __ fld(f12, Address(esp)); +++ __ fmadd_d(f10, f10, f11, f12); +++ __ mv(sp, x30); // Restore caller's SP +++ } +++ break; +++ case Interpreter::java_lang_math_fmaF : +++ if (UseFMA) { +++ entry_point = __ pc(); +++ __ flw(f10, Address(esp, 2 * Interpreter::stackElementSize)); +++ __ flw(f11, Address(esp, Interpreter::stackElementSize)); +++ __ flw(f12, Address(esp)); +++ __ fmadd_s(f10, f10, f11, f12); +++ __ mv(sp, x30); // Restore caller's SP +++ } +++ break; +++ default: +++ ; +++ } +++ if (entry_point != NULL) { +++ __ jr(continuation); +++ } +++ +++ return entry_point; +++} +++ +++// Abstract method entry +++// Attempt to execute abstract method. Throw exception +++address TemplateInterpreterGenerator::generate_abstract_entry(void) { +++ // xmethod: Method* +++ // x30: sender SP +++ +++ address entry_point = __ pc(); +++ +++ // abstract method entry +++ +++ // pop return address, reset last_sp to NULL +++ __ empty_expression_stack(); +++ __ restore_bcp(); // bcp must be correct for exception handler (was destroyed) +++ __ restore_locals(); // make sure locals pointer is correct as well (was destroyed) +++ +++ // throw exception +++ __ call_VM(noreg, CAST_FROM_FN_PTR(address, +++ InterpreterRuntime::throw_AbstractMethodErrorWithMethod), +++ xmethod); +++ // the call_VM checks for exception, so we should never return here. +++ __ should_not_reach_here(); +++ +++ return entry_point; +++} +++ +++address TemplateInterpreterGenerator::generate_StackOverflowError_handler() { +++ address entry = __ pc(); +++ +++#ifdef ASSERT +++ { +++ Label L; +++ __ ld(t0, Address(fp, frame::interpreter_frame_monitor_block_top_offset * wordSize)); +++ __ mv(t1, sp); +++ // maximal sp for current fp (stack grows negative) +++ // check if frame is complete +++ __ bge(t0, t1, L); +++ __ stop ("interpreter frame not set up"); +++ __ bind(L); +++ } +++#endif // ASSERT +++ // Restore bcp under the assumption that the current frame is still +++ // interpreted +++ __ restore_bcp(); +++ +++ // expression stack must be empty before entering the VM if an +++ // exception happened +++ __ empty_expression_stack(); +++ // throw exception +++ __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_StackOverflowError)); +++ return entry; +++} +++ +++address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler() { +++ address entry = __ pc(); +++ // expression stack must be empty before entering the VM if an +++ // exception happened +++ __ empty_expression_stack(); +++ // setup parameters +++ +++ // convention: expect aberrant index in register x11 +++ __ zero_extend(c_rarg2, x11, 32); +++ // convention: expect array in register x13 +++ __ mv(c_rarg1, x13); +++ __ call_VM(noreg, +++ CAST_FROM_FN_PTR(address, +++ InterpreterRuntime:: +++ throw_ArrayIndexOutOfBoundsException), +++ c_rarg1, c_rarg2); +++ return entry; +++} +++ +++address TemplateInterpreterGenerator::generate_ClassCastException_handler() { +++ address entry = __ pc(); +++ +++ // object is at TOS +++ __ pop_reg(c_rarg1); +++ +++ // expression stack must be empty before entering the VM if an +++ // exception happened +++ __ empty_expression_stack(); +++ +++ __ call_VM(noreg, +++ CAST_FROM_FN_PTR(address, +++ InterpreterRuntime:: +++ throw_ClassCastException), +++ c_rarg1); +++ return entry; +++} +++ +++address TemplateInterpreterGenerator::generate_exception_handler_common( +++ const char* name, const char* message, bool pass_oop) { +++ assert(!pass_oop || message == NULL, "either oop or message but not both"); +++ address entry = __ pc(); +++ if (pass_oop) { +++ // object is at TOS +++ __ pop_reg(c_rarg2); +++ } +++ // expression stack must be empty before entering the VM if an +++ // exception happened +++ __ empty_expression_stack(); +++ // setup parameters +++ __ la(c_rarg1, Address((address)name)); +++ if (pass_oop) { +++ __ call_VM(x10, CAST_FROM_FN_PTR(address, +++ InterpreterRuntime:: +++ create_klass_exception), +++ c_rarg1, c_rarg2); +++ } else { +++ // kind of lame ExternalAddress can't take NULL because +++ // external_word_Relocation will assert. +++ if (message != NULL) { +++ __ la(c_rarg2, Address((address)message)); +++ } else { +++ __ mv(c_rarg2, NULL_WORD); +++ } +++ __ call_VM(x10, +++ CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), +++ c_rarg1, c_rarg2); +++ } +++ // throw exception +++ __ j(address(Interpreter::throw_exception_entry())); +++ return entry; +++} +++ +++address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) { +++ address entry = __ pc(); +++ +++ // Restore stack bottom in case i2c adjusted stack +++ __ ld(esp, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize)); +++ // and NULL it as marker that esp is now tos until next java call +++ __ sd(zr, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize)); +++ __ restore_bcp(); +++ __ restore_locals(); +++ __ restore_constant_pool_cache(); +++ __ get_method(xmethod); +++ +++ if (state == atos) { +++ Register obj = x10; +++ Register mdp = x11; +++ Register tmp = x12; +++ __ ld(mdp, Address(xmethod, Method::method_data_offset())); +++ __ profile_return_type(mdp, obj, tmp); +++ } +++ +++ // Pop N words from the stack +++ __ get_cache_and_index_at_bcp(x11, x12, 1, index_size); +++ __ ld(x11, Address(x11, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset())); +++ __ andi(x11, x11, ConstantPoolCacheEntry::parameter_size_mask); +++ +++ __ shadd(esp, x11, esp, t0, 3); +++ +++ // Restore machine SP +++ __ ld(t0, Address(xmethod, Method::const_offset())); +++ __ lhu(t0, Address(t0, ConstMethod::max_stack_offset())); +++ __ addi(t0, t0, frame::interpreter_frame_monitor_size() + 2); +++ __ ld(t1, +++ Address(fp, frame::interpreter_frame_initial_sp_offset * wordSize)); +++ __ slli(t0, t0, 3); +++ __ sub(t0, t1, t0); +++ __ andi(sp, t0, -16); +++ +++ __ check_and_handle_popframe(xthread); +++ __ check_and_handle_earlyret(xthread); +++ +++ __ get_dispatch(); +++ __ dispatch_next(state, step); +++ +++ return entry; +++} +++ +++address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, +++ int step, +++ address continuation) { +++ address entry = __ pc(); +++ __ restore_bcp(); +++ __ restore_locals(); +++ __ restore_constant_pool_cache(); +++ __ get_method(xmethod); +++ __ get_dispatch(); +++ +++ // Calculate stack limit +++ __ ld(t0, Address(xmethod, Method::const_offset())); +++ __ lhu(t0, Address(t0, ConstMethod::max_stack_offset())); +++ __ addi(t0, t0, frame::interpreter_frame_monitor_size() + 2); +++ __ ld(t1, Address(fp, frame::interpreter_frame_initial_sp_offset * wordSize)); +++ __ slli(t0, t0, 3); +++ __ sub(t0, t1, t0); +++ __ andi(sp, t0, -16); +++ +++ // Restore expression stack pointer +++ __ ld(esp, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize)); +++ // NULL last_sp until next java call +++ __ sd(zr, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize)); +++ +++ // handle exceptions +++ { +++ Label L; +++ __ ld(t0, Address(xthread, Thread::pending_exception_offset())); +++ __ beqz(t0, L); +++ __ call_VM(noreg, +++ CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception)); +++ __ should_not_reach_here(); +++ __ bind(L); +++ } +++ +++ if (continuation == NULL) { +++ __ dispatch_next(state, step); +++ } else { +++ __ jump_to_entry(continuation); +++ } +++ return entry; +++} +++ +++address TemplateInterpreterGenerator::generate_result_handler_for(BasicType type) { +++ address entry = __ pc(); +++ if (type == T_OBJECT) { +++ // retrieve result from frame +++ __ ld(x10, Address(fp, frame::interpreter_frame_oop_temp_offset * wordSize)); +++ // and verify it +++ __ verify_oop(x10); +++ } else { +++ __ cast_primitive_type(type, x10); +++ } +++ +++ __ ret(); // return from result handler +++ return entry; +++} +++ +++address TemplateInterpreterGenerator::generate_safept_entry_for(TosState state, +++ address runtime_entry) { +++ assert_cond(runtime_entry != NULL); +++ address entry = __ pc(); +++ __ push(state); +++ __ call_VM(noreg, runtime_entry); +++ __ membar(MacroAssembler::AnyAny); +++ __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos)); +++ return entry; +++} +++ +++// Helpers for commoning out cases in the various type of method entries. +++// +++ +++ +++// increment invocation count & check for overflow +++// +++// Note: checking for negative value instead of overflow +++// so we have a 'sticky' overflow test +++// +++// xmethod: method +++// +++void TemplateInterpreterGenerator::generate_counter_incr(Label* overflow) { +++ Label done; +++ // Note: In tiered we increment either counters in Method* or in MDO depending if we're profiling or not. +++ int increment = InvocationCounter::count_increment; +++ Label no_mdo; +++ if (ProfileInterpreter) { +++ // Are we profiling? +++ __ ld(x10, Address(xmethod, Method::method_data_offset())); +++ __ beqz(x10, no_mdo); +++ // Increment counter in the MDO +++ const Address mdo_invocation_counter(x10, in_bytes(MethodData::invocation_counter_offset()) + +++ in_bytes(InvocationCounter::counter_offset())); +++ const Address mask(x10, in_bytes(MethodData::invoke_mask_offset())); +++ __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, t0, t1, false, overflow); +++ __ j(done); +++ } +++ __ bind(no_mdo); +++ // Increment counter in MethodCounters +++ const Address invocation_counter(t1, +++ MethodCounters::invocation_counter_offset() + +++ InvocationCounter::counter_offset()); +++ __ get_method_counters(xmethod, t1, done); +++ const Address mask(t1, in_bytes(MethodCounters::invoke_mask_offset())); +++ __ increment_mask_and_jump(invocation_counter, increment, mask, t0, x11, false, overflow); +++ __ bind(done); +++} +++ +++void TemplateInterpreterGenerator::generate_counter_overflow(Label& do_continue) { +++ __ mv(c_rarg1, zr); +++ __ call_VM(noreg, +++ CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), c_rarg1); +++ __ j(do_continue); +++} +++ +++// See if we've got enough room on the stack for locals plus overhead +++// below JavaThread::stack_overflow_limit(). If not, throw a StackOverflowError +++// without going through the signal handler, i.e., reserved and yellow zones +++// will not be made usable. The shadow zone must suffice to handle the +++// overflow. +++// The expression stack grows down incrementally, so the normal guard +++// page mechanism will work for that. +++// +++// NOTE: Since the additional locals are also always pushed (wasn't +++// obvious in generate_method_entry) so the guard should work for them +++// too. +++// +++// Args: +++// x13: number of additional locals this frame needs (what we must check) +++// xmethod: Method* +++// +++// Kills: +++// x10 +++void TemplateInterpreterGenerator::generate_stack_overflow_check(void) { +++ +++ // monitor entry size: see picture of stack set +++ // (generate_method_entry) and frame_amd64.hpp +++ const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; +++ +++ // total overhead size: entry_size + (saved fp through expr stack +++ // bottom). be sure to change this if you add/subtract anything +++ // to/from the overhead area +++ const int overhead_size = +++ -(frame::interpreter_frame_initial_sp_offset * wordSize) + entry_size; +++ +++ const int page_size = os::vm_page_size(); +++ +++ Label after_frame_check; +++ +++ // see if the frame is greater than one page in size. If so, +++ // then we need to verify there is enough stack space remaining +++ // for the additional locals. +++ __ mv(t0, (page_size - overhead_size) / Interpreter::stackElementSize); +++ __ bleu(x13, t0, after_frame_check); +++ +++ // compute sp as if this were going to be the last frame on +++ // the stack before the red zone +++ +++ // locals + overhead, in bytes +++ __ mv(x10, overhead_size); +++ __ shadd(x10, x13, x10, t0, Interpreter::logStackElementSize); // 2 slots per parameter. +++ +++ const Address stack_limit(xthread, JavaThread::stack_overflow_limit_offset()); +++ __ ld(t0, stack_limit); +++ +++#ifdef ASSERT +++ Label limit_okay; +++ // Verify that thread stack limit is non-zero. +++ __ bnez(t0, limit_okay); +++ __ stop("stack overflow limit is zero"); +++ __ bind(limit_okay); +++#endif +++ +++ // Add stack limit to locals. +++ __ add(x10, x10, t0); +++ +++ // Check against the current stack bottom. +++ __ bgtu(sp, x10, after_frame_check); +++ +++ // Remove the incoming args, peeling the machine SP back to where it +++ // was in the caller. This is not strictly necessary, but unless we +++ // do so the stack frame may have a garbage FP; this ensures a +++ // correct call stack that we can always unwind. The ANDI should be +++ // unnecessary because the sender SP in x30 is always aligned, but +++ // it doesn't hurt. +++ __ andi(sp, x30, -16); +++ +++ // Note: the restored frame is not necessarily interpreted. +++ // Use the shared runtime version of the StackOverflowError. +++ assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "stub not yet generated"); +++ __ far_jump(RuntimeAddress(StubRoutines::throw_StackOverflowError_entry())); +++ +++ // all done with frame size check +++ __ bind(after_frame_check); +++} +++ +++// Allocate monitor and lock method (asm interpreter) +++// +++// Args: +++// xmethod: Method* +++// xlocals: locals +++// +++// Kills: +++// x10 +++// c_rarg0, c_rarg1, c_rarg2, c_rarg3, ...(param regs) +++// t0, t1 (temporary regs) +++void TemplateInterpreterGenerator::lock_method() { +++ // synchronize method +++ const Address access_flags(xmethod, Method::access_flags_offset()); +++ const Address monitor_block_top(fp, frame::interpreter_frame_monitor_block_top_offset * wordSize); +++ const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; +++ +++#ifdef ASSERT +++ __ lwu(x10, access_flags); +++ __ verify_access_flags(x10, JVM_ACC_SYNCHRONIZED, "method doesn't need synchronization", false); +++#endif // ASSERT +++ +++ // get synchronization object +++ { +++ Label done; +++ __ lwu(x10, access_flags); +++ __ andi(t0, x10, JVM_ACC_STATIC); +++ // get receiver (assume this is frequent case) +++ __ ld(x10, Address(xlocals, Interpreter::local_offset_in_bytes(0))); +++ __ beqz(t0, done); +++ __ load_mirror(x10, xmethod); +++ +++#ifdef ASSERT +++ { +++ Label L; +++ __ bnez(x10, L); +++ __ stop("synchronization object is NULL"); +++ __ bind(L); +++ } +++#endif // ASSERT +++ +++ __ bind(done); +++ } +++ +++ // add space for monitor & lock +++ __ add(sp, sp, - entry_size); // add space for a monitor entry +++ __ add(esp, esp, - entry_size); +++ __ mv(t0, esp); +++ __ sd(t0, monitor_block_top); // set new monitor block top +++ // store object +++ __ sd(x10, Address(esp, BasicObjectLock::obj_offset_in_bytes())); +++ __ mv(c_rarg1, esp); // object address +++ __ lock_object(c_rarg1); +++} +++ +++// Generate a fixed interpreter frame. This is identical setup for +++// interpreted methods and for native methods hence the shared code. +++// +++// Args: +++// ra: return address +++// xmethod: Method* +++// xlocals: pointer to locals +++// xcpool: cp cache +++// stack_pointer: previous sp +++void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) { +++ // initialize fixed part of activation frame +++ if (native_call) { +++ __ add(esp, sp, - 14 * wordSize); +++ __ mv(xbcp, zr); +++ __ add(sp, sp, - 14 * wordSize); +++ // add 2 zero-initialized slots for native calls +++ __ sd(zr, Address(sp, 13 * wordSize)); +++ __ sd(zr, Address(sp, 12 * wordSize)); +++ } else { +++ __ add(esp, sp, - 12 * wordSize); +++ __ ld(t0, Address(xmethod, Method::const_offset())); // get ConstMethod +++ __ add(xbcp, t0, in_bytes(ConstMethod::codes_offset())); // get codebase +++ __ add(sp, sp, - 12 * wordSize); +++ } +++ __ sd(xbcp, Address(sp, wordSize)); +++ __ sd(esp, Address(sp, 0)); +++ +++ if (ProfileInterpreter) { +++ Label method_data_continue; +++ __ ld(t0, Address(xmethod, Method::method_data_offset())); +++ __ beqz(t0, method_data_continue); +++ __ la(t0, Address(t0, in_bytes(MethodData::data_offset()))); +++ __ bind(method_data_continue); +++ } +++ +++ __ sd(xmethod, Address(sp, 7 * wordSize)); +++ __ sd(ProfileInterpreter ? t0 : zr, Address(sp, 6 * wordSize)); +++ +++ // Get mirror and store it in the frame as GC root for this Method* +++ __ load_mirror(t2, xmethod); +++ __ sd(zr, Address(sp, 5 * wordSize)); +++ __ sd(t2, Address(sp, 4 * wordSize)); +++ +++ __ ld(xcpool, Address(xmethod, Method::const_offset())); +++ __ ld(xcpool, Address(xcpool, ConstMethod::constants_offset())); +++ __ ld(xcpool, Address(xcpool, ConstantPool::cache_offset_in_bytes())); +++ __ sd(xcpool, Address(sp, 3 * wordSize)); +++ __ sd(xlocals, Address(sp, 2 * wordSize)); +++ +++ __ sd(ra, Address(sp, 11 * wordSize)); +++ __ sd(fp, Address(sp, 10 * wordSize)); +++ __ la(fp, Address(sp, 12 * wordSize)); // include ra & fp +++ +++ // set sender sp +++ // leave last_sp as null +++ __ sd(x30, Address(sp, 9 * wordSize)); +++ __ sd(zr, Address(sp, 8 * wordSize)); +++ +++ // Move SP out of the way +++ if (!native_call) { +++ __ ld(t0, Address(xmethod, Method::const_offset())); +++ __ lhu(t0, Address(t0, ConstMethod::max_stack_offset())); +++ __ add(t0, t0, frame::interpreter_frame_monitor_size() + 2); +++ __ slli(t0, t0, 3); +++ __ sub(t0, sp, t0); +++ __ andi(sp, t0, -16); +++ } +++} +++ +++// End of helpers +++ +++// Various method entries +++//------------------------------------------------------------------------------------------------------------------------ +++// +++// +++ +++// Method entry for java.lang.ref.Reference.get. +++address TemplateInterpreterGenerator::generate_Reference_get_entry(void) { +++ // Code: _aload_0, _getfield, _areturn +++ // parameter size = 1 +++ // +++ // The code that gets generated by this routine is split into 2 parts: +++ // 1. The "intrinsified" code for G1 (or any SATB based GC), +++ // 2. The slow path - which is an expansion of the regular method entry. +++ // +++ // Notes:- +++ // * In the G1 code we do not check whether we need to block for +++ // a safepoint. If G1 is enabled then we must execute the specialized +++ // code for Reference.get (except when the Reference object is null) +++ // so that we can log the value in the referent field with an SATB +++ // update buffer. +++ // If the code for the getfield template is modified so that the +++ // G1 pre-barrier code is executed when the current method is +++ // Reference.get() then going through the normal method entry +++ // will be fine. +++ // * The G1 code can, however, check the receiver object (the instance +++ // of java.lang.Reference) and jump to the slow path if null. If the +++ // Reference object is null then we obviously cannot fetch the referent +++ // and so we don't need to call the G1 pre-barrier. Thus we can use the +++ // regular method entry code to generate the NPE. +++ // +++ // This code is based on generate_accessor_entry. +++ // +++ // xmethod: Method* +++ // x30: senderSP must preserve for slow path, set SP to it on fast path +++ +++ // ra is live. It must be saved around calls. +++ +++ address entry = __ pc(); +++ +++ const int referent_offset = java_lang_ref_Reference::referent_offset(); +++ guarantee(referent_offset > 0, "referent offset not initialized"); +++ +++ Label slow_path; +++ const Register local_0 = c_rarg0; +++ // Check if local 0 != NULL +++ // If the receiver is null then it is OK to jump to the slow path. +++ __ ld(local_0, Address(esp, 0)); +++ __ beqz(local_0, slow_path); +++ +++ __ mv(x9, x30); // Move senderSP to a callee-saved register +++ +++ // Load the value of the referent field. +++ const Address field_address(local_0, referent_offset); +++ BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); +++ bs->load_at(_masm, IN_HEAP | ON_WEAK_OOP_REF, T_OBJECT, local_0, field_address, /*tmp1*/ t1, /*tmp2*/ t0); +++ +++ // areturn +++ __ andi(sp, x9, -16); // done with stack +++ __ ret(); +++ +++ // generate a vanilla interpreter entry as the slow path +++ __ bind(slow_path); +++ __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals)); +++ return entry; +++} +++ +++/** +++ * Method entry for static native methods: +++ * int java.util.zip.CRC32.update(int crc, int b) +++ */ +++address TemplateInterpreterGenerator::generate_CRC32_update_entry() { +++ // TODO: Unimplemented generate_CRC32_update_entry +++ return 0; +++} +++ +++/** +++ * Method entry for static native methods: +++ * int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int len) +++ * int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len) +++ */ +++address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) { +++ // TODO: Unimplemented generate_CRC32_updateBytes_entry +++ return 0; +++} +++ +++/** +++ * Method entry for intrinsic-candidate (non-native) methods: +++ * int java.util.zip.CRC32C.updateBytes(int crc, byte[] b, int off, int end) +++ * int java.util.zip.CRC32C.updateDirectByteBuffer(int crc, long buf, int off, int end) +++ * Unlike CRC32, CRC32C does not have any methods marked as native +++ * CRC32C also uses an "end" variable instead of the length variable CRC32 uses +++ */ +++address TemplateInterpreterGenerator::generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) { +++ // TODO: Unimplemented generate_CRC32C_updateBytes_entry +++ return 0; +++} +++ +++void TemplateInterpreterGenerator::bang_stack_shadow_pages(bool native_call) { +++ // Bang each page in the shadow zone. We can't assume it's been done for +++ // an interpreter frame with greater than a page of locals, so each page +++ // needs to be checked. Only true for non-native. +++ const int n_shadow_pages = (int)(StackOverflow::stack_shadow_zone_size() / os::vm_page_size()); +++ const int start_page = native_call ? n_shadow_pages : 1; +++ const int page_size = os::vm_page_size(); +++ for (int pages = start_page; pages <= n_shadow_pages ; pages++) { +++ __ sub(t1, sp, pages * page_size); +++ __ sd(zr, Address(t1)); +++ } +++} +++ +++// Interpreter stub for calling a native method. (asm interpreter) +++// This sets up a somewhat different looking stack for calling the +++// native method than the typical interpreter frame setup. +++address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) { +++ // determine code generation flags +++ bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods; +++ +++ // x11: Method* +++ // x30: sender sp +++ +++ address entry_point = __ pc(); +++ +++ const Address constMethod (xmethod, Method::const_offset()); +++ const Address access_flags (xmethod, Method::access_flags_offset()); +++ const Address size_of_parameters(x12, ConstMethod:: +++ size_of_parameters_offset()); +++ +++ // get parameter size (always needed) +++ __ ld(x12, constMethod); +++ __ load_unsigned_short(x12, size_of_parameters); +++ +++ // Native calls don't need the stack size check since they have no +++ // expression stack and the arguments are already on the stack and +++ // we only add a handful of words to the stack. +++ +++ // xmethod: Method* +++ // x12: size of parameters +++ // x30: sender sp +++ +++ // for natives the size of locals is zero +++ +++ // compute beginning of parameters (xlocals) +++ __ shadd(xlocals, x12, esp, xlocals, 3); +++ __ addi(xlocals, xlocals, -wordSize); +++ +++ // Pull SP back to minimum size: this avoids holes in the stack +++ __ andi(sp, esp, -16); +++ +++ // initialize fixed part of activation frame +++ generate_fixed_frame(true); +++ +++ // make sure method is native & not abstract +++#ifdef ASSERT +++ __ lwu(x10, access_flags); +++ __ verify_access_flags(x10, JVM_ACC_NATIVE, "tried to execute non-native method as native", false); +++ __ verify_access_flags(x10, JVM_ACC_ABSTRACT, "tried to execute abstract method in interpreter"); +++#endif +++ +++ // Since at this point in the method invocation the exception +++ // handler would try to exit the monitor of synchronized methods +++ // which hasn't been entered yet, we set the thread local variable +++ // _do_not_unlock_if_synchronized to true. The remove_activation +++ // will check this flag. +++ +++ const Address do_not_unlock_if_synchronized(xthread, +++ in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); +++ __ mv(t1, true); +++ __ sb(t1, do_not_unlock_if_synchronized); +++ +++ // increment invocation count & check for overflow +++ Label invocation_counter_overflow; +++ if (inc_counter) { +++ generate_counter_incr(&invocation_counter_overflow); +++ } +++ +++ Label continue_after_compile; +++ __ bind(continue_after_compile); +++ +++ bang_stack_shadow_pages(true); +++ +++ // reset the _do_not_unlock_if_synchronized flag +++ __ sb(zr, do_not_unlock_if_synchronized); +++ +++ // check for synchronized methods +++ // Must happen AFTER invocation_counter check and stack overflow check, +++ // so method is not locked if overflows. +++ if (synchronized) { +++ lock_method(); +++ } else { +++ // no synchronization necessary +++#ifdef ASSERT +++ __ lwu(x10, access_flags); +++ __ verify_access_flags(x10, JVM_ACC_SYNCHRONIZED, "method needs synchronization"); +++#endif +++ } +++ +++ // start execution +++#ifdef ASSERT +++ __ verify_frame_setup(); +++#endif +++ +++ // jvmti support +++ __ notify_method_entry(); +++ +++ // work registers +++ const Register t = x18; +++ const Register result_handler = x19; +++ +++ // allocate space for parameters +++ __ ld(t, Address(xmethod, Method::const_offset())); +++ __ load_unsigned_short(t, Address(t, ConstMethod::size_of_parameters_offset())); +++ +++ __ slli(t, t, Interpreter::logStackElementSize); +++ __ sub(x30, esp, t); +++ __ andi(sp, x30, -16); +++ __ mv(esp, x30); +++ +++ // get signature handler +++ { +++ Label L; +++ __ ld(t, Address(xmethod, Method::signature_handler_offset())); +++ __ bnez(t, L); +++ __ call_VM(noreg, +++ CAST_FROM_FN_PTR(address, +++ InterpreterRuntime::prepare_native_call), +++ xmethod); +++ __ ld(t, Address(xmethod, Method::signature_handler_offset())); +++ __ bind(L); +++ } +++ +++ // call signature handler +++ assert(InterpreterRuntime::SignatureHandlerGenerator::from() == xlocals, +++ "adjust this code"); +++ assert(InterpreterRuntime::SignatureHandlerGenerator::to() == sp, +++ "adjust this code"); +++ assert(InterpreterRuntime::SignatureHandlerGenerator::temp() == t0, +++ "adjust this code"); +++ +++ // The generated handlers do not touch xmethod (the method). +++ // However, large signatures cannot be cached and are generated +++ // each time here. The slow-path generator can do a GC on return, +++ // so we must reload it after the call. +++ __ jalr(t); +++ __ get_method(xmethod); // slow path can do a GC, reload xmethod +++ +++ +++ // result handler is in x10 +++ // set result handler +++ __ mv(result_handler, x10); +++ // pass mirror handle if static call +++ { +++ Label L; +++ __ lwu(t, Address(xmethod, Method::access_flags_offset())); +++ __ test_bit(t0, t, exact_log2(JVM_ACC_STATIC)); +++ __ beqz(t0, L); +++ // get mirror +++ __ load_mirror(t, xmethod); +++ // copy mirror into activation frame +++ __ sd(t, Address(fp, frame::interpreter_frame_oop_temp_offset * wordSize)); +++ // pass handle to mirror +++ __ addi(c_rarg1, fp, frame::interpreter_frame_oop_temp_offset * wordSize); +++ __ bind(L); +++ } +++ +++ // get native function entry point in x28 +++ { +++ Label L; +++ __ ld(x28, Address(xmethod, Method::native_function_offset())); +++ address unsatisfied = (SharedRuntime::native_method_throw_unsatisfied_link_error_entry()); +++ __ mv(t1, unsatisfied); +++ __ ld(t1, Address(t1, 0)); +++ __ bne(x28, t1, L); +++ __ call_VM(noreg, +++ CAST_FROM_FN_PTR(address, +++ InterpreterRuntime::prepare_native_call), +++ xmethod); +++ __ get_method(xmethod); +++ __ ld(x28, Address(xmethod, Method::native_function_offset())); +++ __ bind(L); +++ } +++ +++ // pass JNIEnv +++ __ add(c_rarg0, xthread, in_bytes(JavaThread::jni_environment_offset())); +++ +++ // It is enough that the pc() points into the right code +++ // segment. It does not have to be the correct return pc. +++ Label native_return; +++ __ set_last_Java_frame(esp, fp, native_return, x30); +++ +++ // change thread state +++#ifdef ASSERT +++ { +++ Label L; +++ __ lwu(t, Address(xthread, JavaThread::thread_state_offset())); +++ __ addi(t0, zr, (u1)_thread_in_Java); +++ __ beq(t, t0, L); +++ __ stop("Wrong thread state in native stub"); +++ __ bind(L); +++ } +++#endif +++ +++ // Change state to native +++ __ la(t1, Address(xthread, JavaThread::thread_state_offset())); +++ __ mv(t0, _thread_in_native); +++ __ membar(MacroAssembler::LoadStore | MacroAssembler::StoreStore); +++ __ sw(t0, Address(t1)); +++ +++ // Call the native method. +++ __ jalr(x28); +++ __ bind(native_return); +++ __ get_method(xmethod); +++ // result potentially in x10 or f10 +++ +++ // make room for the pushes we're about to do +++ __ sub(t0, esp, 4 * wordSize); +++ __ andi(sp, t0, -16); +++ +++ // NOTE: The order of these pushes is known to frame::interpreter_frame_result +++ // in order to extract the result of a method call. If the order of these +++ // pushes change or anything else is added to the stack then the code in +++ // interpreter_frame_result must also change. +++ __ push(dtos); +++ __ push(ltos); +++ +++ // change thread state +++ // Force all preceding writes to be observed prior to thread state change +++ __ membar(MacroAssembler::LoadStore | MacroAssembler::StoreStore); +++ +++ __ mv(t0, _thread_in_native_trans); +++ __ sw(t0, Address(xthread, JavaThread::thread_state_offset())); +++ +++ // Force this write out before the read below +++ __ membar(MacroAssembler::AnyAny); +++ +++ // check for safepoint operation in progress and/or pending suspend requests +++ { +++ Label L, Continue; +++ +++ // We need an acquire here to ensure that any subsequent load of the +++ // global SafepointSynchronize::_state flag is ordered after this load +++ // of the thread-local polling word. We don't want this poll to +++ // return false (i.e. not safepointing) and a later poll of the global +++ // SafepointSynchronize::_state spuriously to return true. +++ // +++ // This is to avoid a race when we're in a native->Java transition +++ // racing the code which wakes up from a safepoint. +++ __ safepoint_poll(L, true /* at_return */, true /* acquire */, false /* in_nmethod */); +++ __ lwu(t1, Address(xthread, JavaThread::suspend_flags_offset())); +++ __ beqz(t1, Continue); +++ __ bind(L); +++ +++ // Don't use call_VM as it will see a possible pending exception +++ // and forward it and never return here preventing us from +++ // clearing _last_native_pc down below. So we do a runtime call by +++ // hand. +++ // +++ __ mv(c_rarg0, xthread); +++ __ call(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)); +++ __ get_method(xmethod); +++ __ reinit_heapbase(); +++ __ bind(Continue); +++ } +++ +++ // change thread state +++ // Force all preceding writes to be observed prior to thread state change +++ __ membar(MacroAssembler::LoadStore | MacroAssembler::StoreStore); +++ +++ __ mv(t0, _thread_in_Java); +++ __ sw(t0, Address(xthread, JavaThread::thread_state_offset())); +++ +++ // reset_last_Java_frame +++ __ reset_last_Java_frame(true); +++ +++ if (CheckJNICalls) { +++ // clear_pending_jni_exception_check +++ __ sd(zr, Address(xthread, JavaThread::pending_jni_exception_check_fn_offset())); +++ } +++ +++ // reset handle block +++ __ ld(t, Address(xthread, JavaThread::active_handles_offset())); +++ __ sd(zr, Address(t, JNIHandleBlock::top_offset_in_bytes())); +++ +++ // If result is an oop unbox and store it in frame where gc will see it +++ // and result handler will pick it up +++ +++ { +++ Label no_oop; +++ __ la(t, ExternalAddress(AbstractInterpreter::result_handler(T_OBJECT))); +++ __ bne(t, result_handler, no_oop); +++ // Unbox oop result, e.g. JNIHandles::resolve result. +++ __ pop(ltos); +++ __ resolve_jobject(x10, xthread, t); +++ __ sd(x10, Address(fp, frame::interpreter_frame_oop_temp_offset * wordSize)); +++ // keep stack depth as expected by pushing oop which will eventually be discarded +++ __ push(ltos); +++ __ bind(no_oop); +++ } +++ +++ { +++ Label no_reguard; +++ __ lwu(t0, Address(xthread, in_bytes(JavaThread::stack_guard_state_offset()))); +++ __ addi(t1, zr, (u1)StackOverflow::stack_guard_yellow_reserved_disabled); +++ __ bne(t0, t1, no_reguard); +++ +++ __ push_call_clobbered_registers(); +++ __ mv(c_rarg0, xthread); +++ __ call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)); +++ __ pop_call_clobbered_registers(); +++ __ bind(no_reguard); +++ } +++ +++ // The method register is junk from after the thread_in_native transition +++ // until here. Also can't call_VM until the bcp has been +++ // restored. Need bcp for throwing exception below so get it now. +++ __ get_method(xmethod); +++ +++ // restore bcp to have legal interpreter frame, i.e., bci == 0 <=> +++ // xbcp == code_base() +++ __ ld(xbcp, Address(xmethod, Method::const_offset())); // get ConstMethod* +++ __ add(xbcp, xbcp, in_bytes(ConstMethod::codes_offset())); // get codebase +++ // handle exceptions (exception handling will handle unlocking!) +++ { +++ Label L; +++ __ ld(t0, Address(xthread, Thread::pending_exception_offset())); +++ __ beqz(t0, L); +++ // Note: At some point we may want to unify this with the code +++ // used in call_VM_base(); i.e., we should use the +++ // StubRoutines::forward_exception code. For now this doesn't work +++ // here because the sp is not correctly set at this point. +++ __ MacroAssembler::call_VM(noreg, +++ CAST_FROM_FN_PTR(address, +++ InterpreterRuntime::throw_pending_exception)); +++ __ should_not_reach_here(); +++ __ bind(L); +++ } +++ +++ // do unlocking if necessary +++ { +++ Label L; +++ __ lwu(t, Address(xmethod, Method::access_flags_offset())); +++ __ test_bit(t0, t, exact_log2(JVM_ACC_SYNCHRONIZED)); +++ __ beqz(t0, L); +++ // the code below should be shared with interpreter macro +++ // assembler implementation +++ { +++ Label unlock; +++ // BasicObjectLock will be first in list, since this is a +++ // synchronized method. However, need to check that the object +++ // has not been unlocked by an explicit monitorexit bytecode. +++ +++ // monitor expect in c_rarg1 for slow unlock path +++ __ la(c_rarg1, Address(fp, // address of first monitor +++ (intptr_t)(frame::interpreter_frame_initial_sp_offset * +++ wordSize - sizeof(BasicObjectLock)))); +++ +++ __ ld(t, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes())); +++ __ bnez(t, unlock); +++ +++ // Entry already unlocked, need to throw exception +++ __ MacroAssembler::call_VM(noreg, +++ CAST_FROM_FN_PTR(address, +++ InterpreterRuntime::throw_illegal_monitor_state_exception)); +++ __ should_not_reach_here(); +++ +++ __ bind(unlock); +++ __ unlock_object(c_rarg1); +++ } +++ __ bind(L); +++ } +++ +++ // jvmti support +++ // Note: This must happen _after_ handling/throwing any exceptions since +++ // the exception handler code notifies the runtime of method exits +++ // too. If this happens before, method entry/exit notifications are +++ // not properly paired (was bug - gri 11/22/99). +++ __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI); +++ +++ __ pop(ltos); +++ __ pop(dtos); +++ +++ __ jalr(result_handler); +++ +++ // remove activation +++ __ ld(esp, Address(fp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp +++ // remove frame anchor +++ __ leave(); +++ +++ // restore sender sp +++ __ mv(sp, esp); +++ +++ __ ret(); +++ +++ if (inc_counter) { +++ // Handle overflow of counter and compile method +++ __ bind(invocation_counter_overflow); +++ generate_counter_overflow(continue_after_compile); +++ } +++ +++ return entry_point; +++} +++ +++// +++// Generic interpreted method entry to (asm) interpreter +++// +++address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) { +++ +++ // determine code generation flags +++ const bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods; +++ +++ // t0: sender sp +++ address entry_point = __ pc(); +++ +++ const Address constMethod(xmethod, Method::const_offset()); +++ const Address access_flags(xmethod, Method::access_flags_offset()); +++ const Address size_of_parameters(x13, +++ ConstMethod::size_of_parameters_offset()); +++ const Address size_of_locals(x13, ConstMethod::size_of_locals_offset()); +++ +++ // get parameter size (always needed) +++ // need to load the const method first +++ __ ld(x13, constMethod); +++ __ load_unsigned_short(x12, size_of_parameters); +++ +++ // x12: size of parameters +++ +++ __ load_unsigned_short(x13, size_of_locals); // get size of locals in words +++ __ sub(x13, x13, x12); // x13 = no. of additional locals +++ +++ // see if we've got enough room on the stack for locals plus overhead. +++ generate_stack_overflow_check(); +++ +++ // compute beginning of parameters (xlocals) +++ __ shadd(xlocals, x12, esp, t1, 3); +++ __ add(xlocals, xlocals, -wordSize); +++ +++ // Make room for additional locals +++ __ slli(t1, x13, 3); +++ __ sub(t0, esp, t1); +++ +++ // Padding between locals and fixed part of activation frame to ensure +++ // SP is always 16-byte aligned. +++ __ andi(sp, t0, -16); +++ +++ // x13 - # of additional locals +++ // allocate space for locals +++ // explicitly initialize locals +++ { +++ Label exit, loop; +++ __ blez(x13, exit); // do nothing if x13 <= 0 +++ __ bind(loop); +++ __ sd(zr, Address(t0)); +++ __ add(t0, t0, wordSize); +++ __ add(x13, x13, -1); // until everything initialized +++ __ bnez(x13, loop); +++ __ bind(exit); +++ } +++ +++ // And the base dispatch table +++ __ get_dispatch(); +++ +++ // initialize fixed part of activation frame +++ generate_fixed_frame(false); +++ +++ // make sure method is not native & not abstract +++#ifdef ASSERT +++ __ lwu(x10, access_flags); +++ __ verify_access_flags(x10, JVM_ACC_NATIVE, "tried to execute native method as non-native"); +++ __ verify_access_flags(x10, JVM_ACC_ABSTRACT, "tried to execute abstract method in interpreter"); +++#endif +++ +++ // Since at this point in the method invocation the exception +++ // handler would try to exit the monitor of synchronized methods +++ // which hasn't been entered yet, we set the thread local variable +++ // _do_not_unlock_if_synchronized to true. The remove_activation +++ // will check this flag. +++ +++ const Address do_not_unlock_if_synchronized(xthread, +++ in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); +++ __ mv(t1, true); +++ __ sb(t1, do_not_unlock_if_synchronized); +++ +++ Label no_mdp; +++ const Register mdp = x13; +++ __ ld(mdp, Address(xmethod, Method::method_data_offset())); +++ __ beqz(mdp, no_mdp); +++ __ add(mdp, mdp, in_bytes(MethodData::data_offset())); +++ __ profile_parameters_type(mdp, x11, x12, x14); // use x11, x12, x14 as tmp registers +++ __ bind(no_mdp); +++ +++ // increment invocation count & check for overflow +++ Label invocation_counter_overflow; +++ if (inc_counter) { +++ generate_counter_incr(&invocation_counter_overflow); +++ } +++ +++ Label continue_after_compile; +++ __ bind(continue_after_compile); +++ +++ bang_stack_shadow_pages(false); +++ +++ // reset the _do_not_unlock_if_synchronized flag +++ __ sb(zr, do_not_unlock_if_synchronized); +++ +++ // check for synchronized methods +++ // Must happen AFTER invocation_counter check and stack overflow check, +++ // so method is not locked if overflows. +++ if (synchronized) { +++ // Allocate monitor and lock method +++ lock_method(); +++ } else { +++ // no synchronization necessary +++#ifdef ASSERT +++ __ lwu(x10, access_flags); +++ __ verify_access_flags(x10, JVM_ACC_SYNCHRONIZED, "method needs synchronization"); +++#endif +++ } +++ +++ // start execution +++#ifdef ASSERT +++ __ verify_frame_setup(); +++#endif +++ +++ // jvmti support +++ __ notify_method_entry(); +++ +++ __ dispatch_next(vtos); +++ +++ // invocation counter overflow +++ if (inc_counter) { +++ // Handle overflow of counter and compile method +++ __ bind(invocation_counter_overflow); +++ generate_counter_overflow(continue_after_compile); +++ } +++ +++ return entry_point; +++} +++ +++//----------------------------------------------------------------------------- +++// Exceptions +++ +++void TemplateInterpreterGenerator::generate_throw_exception() { +++ // Entry point in previous activation (i.e., if the caller was +++ // interpreted) +++ Interpreter::_rethrow_exception_entry = __ pc(); +++ // Restore sp to interpreter_frame_last_sp even though we are going +++ // to empty the expression stack for the exception processing. +++ __ sd(zr, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize)); +++ // x10: exception +++ // x13: return address/pc that threw exception +++ __ restore_bcp(); // xbcp points to call/send +++ __ restore_locals(); +++ __ restore_constant_pool_cache(); +++ __ reinit_heapbase(); // restore xheapbase as heapbase. +++ __ get_dispatch(); +++ +++ // Entry point for exceptions thrown within interpreter code +++ Interpreter::_throw_exception_entry = __ pc(); +++ // If we came here via a NullPointerException on the receiver of a +++ // method, xthread may be corrupt. +++ __ get_method(xmethod); +++ // expression stack is undefined here +++ // x10: exception +++ // xbcp: exception bcp +++ __ verify_oop(x10); +++ __ mv(c_rarg1, x10); +++ +++ // expression stack must be empty before entering the VM in case of +++ // an exception +++ __ empty_expression_stack(); +++ // find exception handler address and preserve exception oop +++ __ call_VM(x13, +++ CAST_FROM_FN_PTR(address, +++ InterpreterRuntime::exception_handler_for_exception), +++ c_rarg1); +++ +++ // Calculate stack limit +++ __ ld(t0, Address(xmethod, Method::const_offset())); +++ __ lhu(t0, Address(t0, ConstMethod::max_stack_offset())); +++ __ add(t0, t0, frame::interpreter_frame_monitor_size() + 4); +++ __ ld(t1, Address(fp, frame::interpreter_frame_initial_sp_offset * wordSize)); +++ __ slli(t0, t0, 3); +++ __ sub(t0, t1, t0); +++ __ andi(sp, t0, -16); +++ +++ // x10: exception handler entry point +++ // x13: preserved exception oop +++ // xbcp: bcp for exception handler +++ __ push_ptr(x13); // push exception which is now the only value on the stack +++ __ jr(x10); // jump to exception handler (may be _remove_activation_entry!) +++ +++ // If the exception is not handled in the current frame the frame is +++ // removed and the exception is rethrown (i.e. exception +++ // continuation is _rethrow_exception). +++ // +++ // Note: At this point the bci is still the bxi for the instruction +++ // which caused the exception and the expression stack is +++ // empty. Thus, for any VM calls at this point, GC will find a legal +++ // oop map (with empty expression stack). +++ +++ // +++ // JVMTI PopFrame support +++ // +++ +++ Interpreter::_remove_activation_preserving_args_entry = __ pc(); +++ __ empty_expression_stack(); +++ // Set the popframe_processing bit in pending_popframe_condition +++ // indicating that we are currently handling popframe, so that +++ // call_VMs that may happen later do not trigger new popframe +++ // handling cycles. +++ __ lwu(x13, Address(xthread, JavaThread::popframe_condition_offset())); +++ __ ori(x13, x13, JavaThread::popframe_processing_bit); +++ __ sw(x13, Address(xthread, JavaThread::popframe_condition_offset())); +++ +++ { +++ // Check to see whether we are returning to a deoptimized frame. +++ // (The PopFrame call ensures that the caller of the popped frame is +++ // either interpreted or compiled and deoptimizes it if compiled.) +++ // In this case, we can't call dispatch_next() after the frame is +++ // popped, but instead must save the incoming arguments and restore +++ // them after deoptimization has occurred. +++ // +++ // Note that we don't compare the return PC against the +++ // deoptimization blob's unpack entry because of the presence of +++ // adapter frames in C2. +++ Label caller_not_deoptimized; +++ __ ld(c_rarg1, Address(fp, frame::return_addr_offset * wordSize)); +++ __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::interpreter_contains), c_rarg1); +++ __ bnez(x10, caller_not_deoptimized); +++ +++ // Compute size of arguments for saving when returning to +++ // deoptimized caller +++ __ get_method(x10); +++ __ ld(x10, Address(x10, Method::const_offset())); +++ __ load_unsigned_short(x10, Address(x10, in_bytes(ConstMethod:: +++ size_of_parameters_offset()))); +++ __ slli(x10, x10, Interpreter::logStackElementSize); +++ __ restore_locals(); +++ __ sub(xlocals, xlocals, x10); +++ __ add(xlocals, xlocals, wordSize); +++ // Save these arguments +++ __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, +++ Deoptimization:: +++ popframe_preserve_args), +++ xthread, x10, xlocals); +++ +++ __ remove_activation(vtos, +++ /* throw_monitor_exception */ false, +++ /* install_monitor_exception */ false, +++ /* notify_jvmdi */ false); +++ +++ // Inform deoptimization that it is responsible for restoring +++ // these arguments +++ __ mv(t0, JavaThread::popframe_force_deopt_reexecution_bit); +++ __ sw(t0, Address(xthread, JavaThread::popframe_condition_offset())); +++ +++ // Continue in deoptimization handler +++ __ ret(); +++ +++ __ bind(caller_not_deoptimized); +++ } +++ +++ __ remove_activation(vtos, +++ /* throw_monitor_exception */ false, +++ /* install_monitor_exception */ false, +++ /* notify_jvmdi */ false); +++ +++ // Restore the last_sp and null it out +++ __ ld(esp, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize)); +++ __ sd(zr, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize)); +++ +++ __ restore_bcp(); +++ __ restore_locals(); +++ __ restore_constant_pool_cache(); +++ __ get_method(xmethod); +++ __ get_dispatch(); +++ +++ // The method data pointer was incremented already during +++ // call profiling. We have to restore the mdp for the current bcp. +++ if (ProfileInterpreter) { +++ __ set_method_data_pointer_for_bcp(); +++ } +++ +++ // Clear the popframe condition flag +++ __ sw(zr, Address(xthread, JavaThread::popframe_condition_offset())); +++ assert(JavaThread::popframe_inactive == 0, "fix popframe_inactive"); +++ +++#if INCLUDE_JVMTI +++ { +++ Label L_done; +++ +++ __ lbu(t0, Address(xbcp, 0)); +++ __ mv(t1, Bytecodes::_invokestatic); +++ __ bne(t1, t0, L_done); +++ +++ // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call. +++ // Detect such a case in the InterpreterRuntime function and return the member name argument,or NULL. +++ +++ __ ld(c_rarg0, Address(xlocals, 0)); +++ __ call_VM(x10, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null),c_rarg0, xmethod, xbcp); +++ +++ __ beqz(x10, L_done); +++ +++ __ sd(x10, Address(esp, 0)); +++ __ bind(L_done); +++ } +++#endif // INCLUDE_JVMTI +++ +++ // Restore machine SP +++ __ ld(t0, Address(xmethod, Method::const_offset())); +++ __ lhu(t0, Address(t0, ConstMethod::max_stack_offset())); +++ __ add(t0, t0, frame::interpreter_frame_monitor_size() + 4); +++ __ ld(t1, Address(fp, frame::interpreter_frame_initial_sp_offset * wordSize)); +++ __ slliw(t0, t0, 3); +++ __ sub(t0, t1, t0); +++ __ andi(sp, t0, -16); +++ +++ __ dispatch_next(vtos); +++ // end of PopFrame support +++ +++ Interpreter::_remove_activation_entry = __ pc(); +++ +++ // preserve exception over this code sequence +++ __ pop_ptr(x10); +++ __ sd(x10, Address(xthread, JavaThread::vm_result_offset())); +++ // remove the activation (without doing throws on illegalMonitorExceptions) +++ __ remove_activation(vtos, false, true, false); +++ // restore exception +++ __ get_vm_result(x10, xthread); +++ +++ // In between activations - previous activation type unknown yet +++ // compute continuation point - the continuation point expects the +++ // following registers set up: +++ // +++ // x10: exception +++ // ra: return address/pc that threw exception +++ // sp: expression stack of caller +++ // fp: fp of caller +++ // FIXME: There's no point saving ra here because VM calls don't trash it +++ __ sub(sp, sp, 2 * wordSize); +++ __ sd(x10, Address(sp, 0)); // save exception +++ __ sd(ra, Address(sp, wordSize)); // save return address +++ __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, +++ SharedRuntime::exception_handler_for_return_address), +++ xthread, ra); +++ __ mv(x11, x10); // save exception handler +++ __ ld(x10, Address(sp, 0)); // restore exception +++ __ ld(ra, Address(sp, wordSize)); // restore return address +++ __ add(sp, sp, 2 * wordSize); +++ // We might be returning to a deopt handler that expects x13 to +++ // contain the exception pc +++ __ mv(x13, ra); +++ // Note that an "issuing PC" is actually the next PC after the call +++ __ jr(x11); // jump to exception +++ // handler of caller +++} +++ +++// +++// JVMTI ForceEarlyReturn support +++// +++address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) { +++ address entry = __ pc(); +++ +++ __ restore_bcp(); +++ __ restore_locals(); +++ __ empty_expression_stack(); +++ __ load_earlyret_value(state); +++ +++ __ ld(t0, Address(xthread, JavaThread::jvmti_thread_state_offset())); +++ Address cond_addr(t0, JvmtiThreadState::earlyret_state_offset()); +++ +++ // Clear the earlyret state +++ assert(JvmtiThreadState::earlyret_inactive == 0, "should be"); +++ __ sd(zr, cond_addr); +++ +++ __ remove_activation(state, +++ false, /* throw_monitor_exception */ +++ false, /* install_monitor_exception */ +++ true); /* notify_jvmdi */ +++ __ ret(); +++ +++ return entry; +++} +++// end of ForceEarlyReturn support +++ +++//----------------------------------------------------------------------------- +++// Helper for vtos entry point generation +++ +++void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t, +++ address& bep, +++ address& cep, +++ address& sep, +++ address& aep, +++ address& iep, +++ address& lep, +++ address& fep, +++ address& dep, +++ address& vep) { +++ assert(t != NULL && t->is_valid() && t->tos_in() == vtos, "illegal template"); +++ Label L; +++ aep = __ pc(); __ push_ptr(); __ j(L); +++ fep = __ pc(); __ push_f(); __ j(L); +++ dep = __ pc(); __ push_d(); __ j(L); +++ lep = __ pc(); __ push_l(); __ j(L); +++ bep = cep = sep = +++ iep = __ pc(); __ push_i(); +++ vep = __ pc(); +++ __ bind(L); +++ generate_and_dispatch(t); +++} +++ +++//----------------------------------------------------------------------------- +++ +++// Non-product code +++#ifndef PRODUCT +++address TemplateInterpreterGenerator::generate_trace_code(TosState state) { +++ address entry = __ pc(); +++ +++ __ push_reg(ra); +++ __ push(state); +++ __ push_reg(RegSet::range(x10, x17) + RegSet::range(x5, x7) + RegSet::range(x28, x31), sp); +++ __ mv(c_rarg2, x10); // Pass itos +++ __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), c_rarg1, c_rarg2, c_rarg3); +++ __ pop_reg(RegSet::range(x10, x17) + RegSet::range(x5, x7) + RegSet::range(x28, x31), sp); +++ __ pop(state); +++ __ pop_reg(ra); +++ __ ret(); // return from result handler +++ +++ return entry; +++} +++ +++void TemplateInterpreterGenerator::count_bytecode() { +++ __ push_reg(t0); +++ __ push_reg(x10); +++ __ mv(x10, (address) &BytecodeCounter::_counter_value); +++ __ mv(t0, 1); +++ __ amoadd_d(zr, x10, t0, Assembler::aqrl); +++ __ pop_reg(x10); +++ __ pop_reg(t0); +++} +++ +++void TemplateInterpreterGenerator::histogram_bytecode(Template* t) { ; } +++ +++void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) { ; } +++ +++void TemplateInterpreterGenerator::trace_bytecode(Template* t) { +++ // Call a little run-time stub to avoid blow-up for each bytecode. +++ // The run-time runtime saves the right registers, depending on +++ // the tosca in-state for the given template. +++ +++ assert(Interpreter::trace_code(t->tos_in()) != NULL, "entry must have been generated"); +++ __ jal(Interpreter::trace_code(t->tos_in())); +++ __ reinit_heapbase(); +++} +++ +++void TemplateInterpreterGenerator::stop_interpreter_at() { +++ Label L; +++ __ push_reg(t0); +++ __ mv(t0, (address) &BytecodeCounter::_counter_value); +++ __ ld(t0, Address(t0)); +++ __ mv(t1, StopInterpreterAt); +++ __ bne(t0, t1, L); +++ __ ebreak(); +++ __ bind(L); +++ __ pop_reg(t0); +++} +++ +++#endif // !PRODUCT ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/templateTable_riscv.cpp ++@@ -0,0 +1,3949 @@ +++/* +++ * Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2014, Red Hat Inc. All rights reserved. +++ * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#include "precompiled.hpp" +++#include "asm/macroAssembler.inline.hpp" +++#include "gc/shared/barrierSetAssembler.hpp" +++#include "gc/shared/collectedHeap.hpp" +++#include "gc/shared/tlab_globals.hpp" +++#include "interpreter/interp_masm.hpp" +++#include "interpreter/interpreter.hpp" +++#include "interpreter/interpreterRuntime.hpp" +++#include "interpreter/templateTable.hpp" +++#include "memory/universe.hpp" +++#include "oops/method.hpp" +++#include "oops/methodData.hpp" +++#include "oops/objArrayKlass.hpp" +++#include "oops/oop.inline.hpp" +++#include "prims/jvmtiExport.hpp" +++#include "prims/methodHandles.hpp" +++#include "runtime/frame.inline.hpp" +++#include "runtime/sharedRuntime.hpp" +++#include "runtime/stubRoutines.hpp" +++#include "runtime/synchronizer.hpp" +++#include "utilities/powerOfTwo.hpp" +++ +++#define __ _masm-> +++ +++// Address computation: local variables +++ +++static inline Address iaddress(int n) { +++ return Address(xlocals, Interpreter::local_offset_in_bytes(n)); +++} +++ +++static inline Address laddress(int n) { +++ return iaddress(n + 1); +++} +++ +++static inline Address faddress(int n) { +++ return iaddress(n); +++} +++ +++static inline Address daddress(int n) { +++ return laddress(n); +++} +++ +++static inline Address aaddress(int n) { +++ return iaddress(n); +++} +++ +++static inline Address iaddress(Register r, Register temp, InterpreterMacroAssembler* _masm) { +++ _masm->shadd(temp, r, xlocals, temp, 3); +++ return Address(temp, 0); +++} +++ +++static inline Address laddress(Register r, Register temp, InterpreterMacroAssembler* _masm) { +++ _masm->shadd(temp, r, xlocals, temp, 3); +++ return Address(temp, Interpreter::local_offset_in_bytes(1));; +++} +++ +++static inline Address faddress(Register r, Register temp, InterpreterMacroAssembler* _masm) { +++ return iaddress(r, temp, _masm); +++} +++ +++static inline Address daddress(Register r, Register temp, InterpreterMacroAssembler* _masm) { +++ return laddress(r, temp, _masm); +++} +++ +++static inline Address aaddress(Register r, Register temp, InterpreterMacroAssembler* _masm) { +++ return iaddress(r, temp, _masm); +++} +++ +++static inline Address at_rsp() { +++ return Address(esp, 0); +++} +++ +++// At top of Java expression stack which may be different than esp(). It +++// isn't for category 1 objects. +++static inline Address at_tos () { +++ return Address(esp, Interpreter::expr_offset_in_bytes(0)); +++} +++ +++static inline Address at_tos_p1() { +++ return Address(esp, Interpreter::expr_offset_in_bytes(1)); +++} +++ +++static inline Address at_tos_p2() { +++ return Address(esp, Interpreter::expr_offset_in_bytes(2)); +++} +++ +++static inline Address at_tos_p3() { +++ return Address(esp, Interpreter::expr_offset_in_bytes(3)); +++} +++ +++static inline Address at_tos_p4() { +++ return Address(esp, Interpreter::expr_offset_in_bytes(4)); +++} +++ +++static inline Address at_tos_p5() { +++ return Address(esp, Interpreter::expr_offset_in_bytes(5)); +++} +++ +++// Miscelaneous helper routines +++// Store an oop (or NULL) at the Address described by obj. +++// If val == noreg this means store a NULL +++static void do_oop_store(InterpreterMacroAssembler* _masm, +++ Address dst, +++ Register val, +++ DecoratorSet decorators) { +++ assert(val == noreg || val == x10, "parameter is just for looks"); +++ __ store_heap_oop(dst, val, x29, x11, decorators); +++} +++ +++static void do_oop_load(InterpreterMacroAssembler* _masm, +++ Address src, +++ Register dst, +++ DecoratorSet decorators) { +++ __ load_heap_oop(dst, src, x7, x11, decorators); +++} +++ +++Address TemplateTable::at_bcp(int offset) { +++ assert(_desc->uses_bcp(), "inconsistent uses_bcp information"); +++ return Address(xbcp, offset); +++} +++ +++void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg, +++ Register temp_reg, bool load_bc_into_bc_reg/*=true*/, +++ int byte_no) +++{ +++ if (!RewriteBytecodes) { return; } +++ Label L_patch_done; +++ +++ switch (bc) { +++ case Bytecodes::_fast_aputfield: // fall through +++ case Bytecodes::_fast_bputfield: // fall through +++ case Bytecodes::_fast_zputfield: // fall through +++ case Bytecodes::_fast_cputfield: // fall through +++ case Bytecodes::_fast_dputfield: // fall through +++ case Bytecodes::_fast_fputfield: // fall through +++ case Bytecodes::_fast_iputfield: // fall through +++ case Bytecodes::_fast_lputfield: // fall through +++ case Bytecodes::_fast_sputfield: { +++ // We skip bytecode quickening for putfield instructions when +++ // the put_code written to the constant pool cache is zero. +++ // This is required so that every execution of this instruction +++ // calls out to InterpreterRuntime::resolve_get_put to do +++ // additional, required work. +++ assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); +++ assert(load_bc_into_bc_reg, "we use bc_reg as temp"); +++ __ get_cache_and_index_and_bytecode_at_bcp(temp_reg, bc_reg, temp_reg, byte_no, 1); +++ __ mv(bc_reg, bc); +++ __ beqz(temp_reg, L_patch_done); +++ break; +++ } +++ default: +++ assert(byte_no == -1, "sanity"); +++ // the pair bytecodes have already done the load. +++ if (load_bc_into_bc_reg) { +++ __ mv(bc_reg, bc); +++ } +++ } +++ +++ if (JvmtiExport::can_post_breakpoint()) { +++ Label L_fast_patch; +++ // if a breakpoint is present we can't rewrite the stream directly +++ __ load_unsigned_byte(temp_reg, at_bcp(0)); +++ __ addi(temp_reg, temp_reg, -Bytecodes::_breakpoint); // temp_reg is temporary register. +++ __ bnez(temp_reg, L_fast_patch); +++ // Let breakpoint table handling rewrite to quicker bytecode +++ __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), xmethod, xbcp, bc_reg); +++ __ j(L_patch_done); +++ __ bind(L_fast_patch); +++ } +++ +++#ifdef ASSERT +++ Label L_okay; +++ __ load_unsigned_byte(temp_reg, at_bcp(0)); +++ __ beq(temp_reg, bc_reg, L_okay); +++ __ addi(temp_reg, temp_reg, -(int) Bytecodes::java_code(bc)); +++ __ beqz(temp_reg, L_okay); +++ __ stop("patching the wrong bytecode"); +++ __ bind(L_okay); +++#endif +++ +++ // patch bytecode +++ __ sb(bc_reg, at_bcp(0)); +++ __ bind(L_patch_done); +++} +++ +++// Individual instructions +++ +++void TemplateTable::nop() { +++ transition(vtos, vtos); +++ // nothing to do +++} +++ +++void TemplateTable::shouldnotreachhere() { +++ transition(vtos, vtos); +++ __ stop("should not reach here bytecode"); +++} +++ +++void TemplateTable::aconst_null() +++{ +++ transition(vtos, atos); +++ __ mv(x10, zr); +++} +++ +++void TemplateTable::iconst(int value) +++{ +++ transition(vtos, itos); +++ __ mv(x10, value); +++} +++ +++void TemplateTable::lconst(int value) +++{ +++ transition(vtos, ltos); +++ __ mv(x10, value); +++} +++ +++void TemplateTable::fconst(int value) +++{ +++ transition(vtos, ftos); +++ static float fBuf[2] = {1.0, 2.0}; +++ __ mv(t0, (intptr_t)fBuf); +++ switch (value) { +++ case 0: +++ __ fmv_w_x(f10, zr); +++ break; +++ case 1: +++ __ flw(f10, Address(t0, 0)); +++ break; +++ case 2: +++ __ flw(f10, Address(t0, sizeof(float))); +++ break; +++ default: +++ ShouldNotReachHere(); +++ } +++} +++ +++void TemplateTable::dconst(int value) +++{ +++ transition(vtos, dtos); +++ static double dBuf[2] = {1.0, 2.0}; +++ __ mv(t0, (intptr_t)dBuf); +++ switch (value) { +++ case 0: +++ __ fmv_d_x(f10, zr); +++ break; +++ case 1: +++ __ fld(f10, Address(t0, 0)); +++ break; +++ case 2: +++ __ fld(f10, Address(t0, sizeof(double))); +++ break; +++ default: +++ ShouldNotReachHere(); +++ } +++} +++ +++void TemplateTable::bipush() +++{ +++ transition(vtos, itos); +++ __ load_signed_byte(x10, at_bcp(1)); +++} +++ +++void TemplateTable::sipush() +++{ +++ transition(vtos, itos); +++ __ load_unsigned_short(x10, at_bcp(1)); +++ __ revb_w_w(x10, x10); +++ __ sraiw(x10, x10, 16); +++} +++ +++void TemplateTable::ldc(bool wide) +++{ +++ transition(vtos, vtos); +++ Label call_ldc, notFloat, notClass, notInt, Done; +++ +++ if (wide) { +++ __ get_unsigned_2_byte_index_at_bcp(x11, 1); +++ } else { +++ __ load_unsigned_byte(x11, at_bcp(1)); +++ } +++ __ get_cpool_and_tags(x12, x10); +++ +++ const int base_offset = ConstantPool::header_size() * wordSize; +++ const int tags_offset = Array::base_offset_in_bytes(); +++ +++ // get type +++ __ addi(x13, x11, tags_offset); +++ __ add(x13, x10, x13); +++ __ membar(MacroAssembler::AnyAny); +++ __ lbu(x13, Address(x13, 0)); +++ __ membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore); +++ +++ // unresolved class - get the resolved class +++ __ mv(t1, (u1)JVM_CONSTANT_UnresolvedClass); +++ __ beq(x13, t1, call_ldc); +++ +++ // unresolved class in error state - call into runtime to throw the error +++ // from the first resolution attempt +++ __ mv(t1, (u1)JVM_CONSTANT_UnresolvedClassInError); +++ __ beq(x13, t1, call_ldc); +++ +++ // resolved class - need to call vm to get java mirror of the class +++ __ mv(t1, (u1)JVM_CONSTANT_Class); +++ __ bne(x13, t1, notClass); +++ +++ __ bind(call_ldc); +++ __ mv(c_rarg1, wide); +++ call_VM(x10, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), c_rarg1); +++ __ push_ptr(x10); +++ __ verify_oop(x10); +++ __ j(Done); +++ +++ __ bind(notClass); +++ __ mv(t1, (u1)JVM_CONSTANT_Float); +++ __ bne(x13, t1, notFloat); +++ +++ // ftos +++ __ shadd(x11, x11, x12, x11, 3); +++ __ flw(f10, Address(x11, base_offset)); +++ __ push_f(f10); +++ __ j(Done); +++ +++ __ bind(notFloat); +++ +++ __ mv(t1, (u1)JVM_CONSTANT_Integer); +++ __ bne(x13, t1, notInt); +++ +++ // itos +++ __ shadd(x11, x11, x12, x11, 3); +++ __ lw(x10, Address(x11, base_offset)); +++ __ push_i(x10); +++ __ j(Done); +++ +++ __ bind(notInt); +++ condy_helper(Done); +++ +++ __ bind(Done); +++} +++ +++// Fast path for caching oop constants. +++void TemplateTable::fast_aldc(bool wide) +++{ +++ transition(vtos, atos); +++ +++ const Register result = x10; +++ const Register tmp = x11; +++ const Register rarg = x12; +++ +++ const int index_size = wide ? sizeof(u2) : sizeof(u1); +++ +++ Label resolved; +++ +++ // We are resolved if the resolved reference cache entry contains a +++ // non-null object (String, MethodType, etc.) +++ assert_different_registers(result, tmp); +++ __ get_cache_index_at_bcp(tmp, 1, index_size); +++ __ load_resolved_reference_at_index(result, tmp); +++ __ bnez(result, resolved); +++ +++ const address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); +++ +++ // first time invocation - must resolve first +++ __ mv(rarg, (int)bytecode()); +++ __ call_VM(result, entry, rarg); +++ +++ __ bind(resolved); +++ +++ { // Check for the null sentinel. +++ // If we just called the VM, it already did the mapping for us, +++ // but it's harmless to retry. +++ Label notNull; +++ +++ // Stash null_sentinel address to get its value later +++ int32_t offset = 0; +++ __ mv(rarg, Universe::the_null_sentinel_addr(), offset); +++ __ ld(tmp, Address(rarg, offset)); +++ __ resolve_oop_handle(tmp); +++ __ bne(result, tmp, notNull); +++ __ mv(result, zr); // NULL object reference +++ __ bind(notNull); +++ } +++ +++ if (VerifyOops) { +++ // Safe to call with 0 result +++ __ verify_oop(result); +++ } +++} +++ +++void TemplateTable::ldc2_w() +++{ +++ transition(vtos, vtos); +++ Label notDouble, notLong, Done; +++ __ get_unsigned_2_byte_index_at_bcp(x10, 1); +++ +++ __ get_cpool_and_tags(x11, x12); +++ const int base_offset = ConstantPool::header_size() * wordSize; +++ const int tags_offset = Array::base_offset_in_bytes(); +++ +++ // get type +++ __ add(x12, x12, x10); +++ __ load_unsigned_byte(x12, Address(x12, tags_offset)); +++ __ mv(t1, JVM_CONSTANT_Double); +++ __ bne(x12, t1, notDouble); +++ +++ // dtos +++ __ shadd(x12, x10, x11, x12, 3); +++ __ fld(f10, Address(x12, base_offset)); +++ __ push_d(f10); +++ __ j(Done); +++ +++ __ bind(notDouble); +++ __ mv(t1, (int)JVM_CONSTANT_Long); +++ __ bne(x12, t1, notLong); +++ +++ // ltos +++ __ shadd(x10, x10, x11, x10, 3); +++ __ ld(x10, Address(x10, base_offset)); +++ __ push_l(x10); +++ __ j(Done); +++ +++ __ bind(notLong); +++ condy_helper(Done); +++ __ bind(Done); +++} +++ +++void TemplateTable::condy_helper(Label& Done) +++{ +++ const Register obj = x10; +++ const Register rarg = x11; +++ const Register flags = x12; +++ const Register off = x13; +++ +++ const address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); +++ +++ __ mv(rarg, (int) bytecode()); +++ __ call_VM(obj, entry, rarg); +++ +++ __ get_vm_result_2(flags, xthread); +++ +++ // VMr = obj = base address to find primitive value to push +++ // VMr2 = flags = (tos, off) using format of CPCE::_flags +++ __ mv(off, flags); +++ __ mv(t0, ConstantPoolCacheEntry::field_index_mask); +++ __ andrw(off, off, t0); +++ +++ __ add(off, obj, off); +++ const Address field(off, 0); // base + R---->base + offset +++ +++ __ slli(flags, flags, XLEN - (ConstantPoolCacheEntry::tos_state_shift + ConstantPoolCacheEntry::tos_state_bits)); +++ __ srli(flags, flags, XLEN - ConstantPoolCacheEntry::tos_state_bits); // (1 << 5) - 4 --> 28~31==> flags:0~3 +++ +++ switch (bytecode()) { +++ case Bytecodes::_ldc: // fall through +++ case Bytecodes::_ldc_w: { +++ // tos in (itos, ftos, stos, btos, ctos, ztos) +++ Label notInt, notFloat, notShort, notByte, notChar, notBool; +++ __ mv(t1, itos); +++ __ bne(flags, t1, notInt); +++ // itos +++ __ lw(x10, field); +++ __ push(itos); +++ __ j(Done); +++ +++ __ bind(notInt); +++ __ mv(t1, ftos); +++ __ bne(flags, t1, notFloat); +++ // ftos +++ __ load_float(field); +++ __ push(ftos); +++ __ j(Done); +++ +++ __ bind(notFloat); +++ __ mv(t1, stos); +++ __ bne(flags, t1, notShort); +++ // stos +++ __ load_signed_short(x10, field); +++ __ push(stos); +++ __ j(Done); +++ +++ __ bind(notShort); +++ __ mv(t1, btos); +++ __ bne(flags, t1, notByte); +++ // btos +++ __ load_signed_byte(x10, field); +++ __ push(btos); +++ __ j(Done); +++ +++ __ bind(notByte); +++ __ mv(t1, ctos); +++ __ bne(flags, t1, notChar); +++ // ctos +++ __ load_unsigned_short(x10, field); +++ __ push(ctos); +++ __ j(Done); +++ +++ __ bind(notChar); +++ __ mv(t1, ztos); +++ __ bne(flags, t1, notBool); +++ // ztos +++ __ load_signed_byte(x10, field); +++ __ push(ztos); +++ __ j(Done); +++ +++ __ bind(notBool); +++ break; +++ } +++ +++ case Bytecodes::_ldc2_w: { +++ Label notLong, notDouble; +++ __ mv(t1, ltos); +++ __ bne(flags, t1, notLong); +++ // ltos +++ __ ld(x10, field); +++ __ push(ltos); +++ __ j(Done); +++ +++ __ bind(notLong); +++ __ mv(t1, dtos); +++ __ bne(flags, t1, notDouble); +++ // dtos +++ __ load_double(field); +++ __ push(dtos); +++ __ j(Done); +++ +++ __ bind(notDouble); +++ break; +++ } +++ +++ default: +++ ShouldNotReachHere(); +++ } +++ +++ __ stop("bad ldc/condy"); +++} +++ +++void TemplateTable::locals_index(Register reg, int offset) +++{ +++ __ lbu(reg, at_bcp(offset)); +++ __ neg(reg, reg); +++} +++ +++void TemplateTable::iload() { +++ iload_internal(); +++} +++ +++void TemplateTable::nofast_iload() { +++ iload_internal(may_not_rewrite); +++} +++ +++void TemplateTable::iload_internal(RewriteControl rc) { +++ transition(vtos, itos); +++ if (RewriteFrequentPairs && rc == may_rewrite) { +++ Label rewrite, done; +++ const Register bc = x14; +++ +++ // get next bytecode +++ __ load_unsigned_byte(x11, at_bcp(Bytecodes::length_for(Bytecodes::_iload))); +++ +++ // if _iload, wait to rewrite to iload2. We only want to rewrite the +++ // last two iloads in a pair. Comparing against fast_iload means that +++ // the next bytecode is neither an iload or a caload, and therefore +++ // an iload pair. +++ __ mv(t1, Bytecodes::_iload); +++ __ beq(x11, t1, done); +++ +++ // if _fast_iload rewrite to _fast_iload2 +++ __ mv(t1, Bytecodes::_fast_iload); +++ __ mv(bc, Bytecodes::_fast_iload2); +++ __ beq(x11, t1, rewrite); +++ +++ // if _caload rewrite to _fast_icaload +++ __ mv(t1, Bytecodes::_caload); +++ __ mv(bc, Bytecodes::_fast_icaload); +++ __ beq(x11, t1, rewrite); +++ +++ // else rewrite to _fast_iload +++ __ mv(bc, Bytecodes::_fast_iload); +++ +++ // rewrite +++ // bc: new bytecode +++ __ bind(rewrite); +++ patch_bytecode(Bytecodes::_iload, bc, x11, false); +++ __ bind(done); +++ +++ } +++ +++ // do iload, get the local value into tos +++ locals_index(x11); +++ __ lw(x10, iaddress(x11, x10, _masm)); +++} +++ +++void TemplateTable::fast_iload2() +++{ +++ transition(vtos, itos); +++ locals_index(x11); +++ __ lw(x10, iaddress(x11, x10, _masm)); +++ __ push(itos); +++ locals_index(x11, 3); +++ __ lw(x10, iaddress(x11, x10, _masm)); +++} +++ +++void TemplateTable::fast_iload() +++{ +++ transition(vtos, itos); +++ locals_index(x11); +++ __ lw(x10, iaddress(x11, x10, _masm)); +++} +++ +++void TemplateTable::lload() +++{ +++ transition(vtos, ltos); +++ __ lbu(x11, at_bcp(1)); +++ __ slli(x11, x11, LogBytesPerWord); +++ __ sub(x11, xlocals, x11); +++ __ ld(x10, Address(x11, Interpreter::local_offset_in_bytes(1))); +++} +++ +++void TemplateTable::fload() +++{ +++ transition(vtos, ftos); +++ locals_index(x11); +++ __ flw(f10, faddress(x11, t0, _masm)); +++} +++ +++void TemplateTable::dload() +++{ +++ transition(vtos, dtos); +++ __ lbu(x11, at_bcp(1)); +++ __ slli(x11, x11, LogBytesPerWord); +++ __ sub(x11, xlocals, x11); +++ __ fld(f10, Address(x11, Interpreter::local_offset_in_bytes(1))); +++} +++ +++void TemplateTable::aload() +++{ +++ transition(vtos, atos); +++ locals_index(x11); +++ __ ld(x10, iaddress(x11, x10, _masm)); +++ +++} +++ +++void TemplateTable::locals_index_wide(Register reg) { +++ __ lhu(reg, at_bcp(2)); +++ __ revb_h_h_u(reg, reg); // reverse bytes in half-word and zero-extend +++ __ neg(reg, reg); +++} +++ +++void TemplateTable::wide_iload() { +++ transition(vtos, itos); +++ locals_index_wide(x11); +++ __ lw(x10, iaddress(x11, t0, _masm)); +++} +++ +++void TemplateTable::wide_lload() +++{ +++ transition(vtos, ltos); +++ __ lhu(x11, at_bcp(2)); +++ __ revb_h_h_u(x11, x11); // reverse bytes in half-word and zero-extend +++ __ slli(x11, x11, LogBytesPerWord); +++ __ sub(x11, xlocals, x11); +++ __ ld(x10, Address(x11, Interpreter::local_offset_in_bytes(1))); +++} +++ +++void TemplateTable::wide_fload() +++{ +++ transition(vtos, ftos); +++ locals_index_wide(x11); +++ __ flw(f10, faddress(x11, t0, _masm)); +++} +++ +++void TemplateTable::wide_dload() +++{ +++ transition(vtos, dtos); +++ __ lhu(x11, at_bcp(2)); +++ __ revb_h_h_u(x11, x11); // reverse bytes in half-word and zero-extend +++ __ slli(x11, x11, LogBytesPerWord); +++ __ sub(x11, xlocals, x11); +++ __ fld(f10, Address(x11, Interpreter::local_offset_in_bytes(1))); +++} +++ +++void TemplateTable::wide_aload() +++{ +++ transition(vtos, atos); +++ locals_index_wide(x11); +++ __ ld(x10, aaddress(x11, t0, _masm)); +++} +++ +++void TemplateTable::index_check(Register array, Register index) +++{ +++ // destroys x11, t0 +++ // check array +++ __ null_check(array, arrayOopDesc::length_offset_in_bytes()); +++ // sign extend index for use by indexed load +++ // check index +++ const Register length = t0; +++ __ lwu(length, Address(array, arrayOopDesc::length_offset_in_bytes())); +++ if (index != x11) { +++ assert(x11 != array, "different registers"); +++ __ mv(x11, index); +++ } +++ Label ok; +++ __ sign_extend(index, index, 32); +++ __ bltu(index, length, ok); +++ __ mv(x13, array); +++ __ mv(t0, Interpreter::_throw_ArrayIndexOutOfBoundsException_entry); +++ __ jr(t0); +++ __ bind(ok); +++} +++ +++void TemplateTable::iaload() +++{ +++ transition(itos, itos); +++ __ mv(x11, x10); +++ __ pop_ptr(x10); +++ // x10: array +++ // x11: index +++ index_check(x10, x11); // leaves index in x11 +++ __ add(x11, x11, arrayOopDesc::base_offset_in_bytes(T_INT) >> 2); +++ __ shadd(x10, x11, x10, t0, 2); +++ __ access_load_at(T_INT, IN_HEAP | IS_ARRAY, x10, Address(x10), noreg, noreg); +++ __ sign_extend(x10, x10, 32); +++} +++ +++void TemplateTable::laload() +++{ +++ transition(itos, ltos); +++ __ mv(x11, x10); +++ __ pop_ptr(x10); +++ // x10: array +++ // x11: index +++ index_check(x10, x11); // leaves index in x11 +++ __ add(x11, x11, arrayOopDesc::base_offset_in_bytes(T_LONG) >> 3); +++ __ shadd(x10, x11, x10, t0, 3); +++ __ access_load_at(T_LONG, IN_HEAP | IS_ARRAY, x10, Address(x10), noreg, noreg); +++} +++ +++void TemplateTable::faload() +++{ +++ transition(itos, ftos); +++ __ mv(x11, x10); +++ __ pop_ptr(x10); +++ // x10: array +++ // x11: index +++ index_check(x10, x11); // leaves index in x11 +++ __ add(x11, x11, arrayOopDesc::base_offset_in_bytes(T_FLOAT) >> 2); +++ __ shadd(x10, x11, x10, t0, 2); +++ __ access_load_at(T_FLOAT, IN_HEAP | IS_ARRAY, x10, Address(x10), noreg, noreg); +++} +++ +++void TemplateTable::daload() +++{ +++ transition(itos, dtos); +++ __ mv(x11, x10); +++ __ pop_ptr(x10); +++ // x10: array +++ // x11: index +++ index_check(x10, x11); // leaves index in x11 +++ __ add(x11, x11, arrayOopDesc::base_offset_in_bytes(T_DOUBLE) >> 3); +++ __ shadd(x10, x11, x10, t0, 3); +++ __ access_load_at(T_DOUBLE, IN_HEAP | IS_ARRAY, x10, Address(x10), noreg, noreg); +++} +++ +++void TemplateTable::aaload() +++{ +++ transition(itos, atos); +++ __ mv(x11, x10); +++ __ pop_ptr(x10); +++ // x10: array +++ // x11: index +++ index_check(x10, x11); // leaves index in x11 +++ __ add(x11, x11, arrayOopDesc::base_offset_in_bytes(T_OBJECT) >> LogBytesPerHeapOop); +++ __ shadd(x10, x11, x10, t0, LogBytesPerHeapOop); +++ do_oop_load(_masm, +++ Address(x10), +++ x10, +++ IS_ARRAY); +++} +++ +++void TemplateTable::baload() +++{ +++ transition(itos, itos); +++ __ mv(x11, x10); +++ __ pop_ptr(x10); +++ // x10: array +++ // x11: index +++ index_check(x10, x11); // leaves index in x11 +++ __ add(x11, x11, arrayOopDesc::base_offset_in_bytes(T_BYTE) >> 0); +++ __ shadd(x10, x11, x10, t0, 0); +++ __ access_load_at(T_BYTE, IN_HEAP | IS_ARRAY, x10, Address(x10), noreg, noreg); +++} +++ +++void TemplateTable::caload() +++{ +++ transition(itos, itos); +++ __ mv(x11, x10); +++ __ pop_ptr(x10); +++ // x10: array +++ // x11: index +++ index_check(x10, x11); // leaves index in x11 +++ __ add(x11, x11, arrayOopDesc::base_offset_in_bytes(T_CHAR) >> 1); +++ __ shadd(x10, x11, x10, t0, 1); +++ __ access_load_at(T_CHAR, IN_HEAP | IS_ARRAY, x10, Address(x10), noreg, noreg); +++} +++ +++// iload followed by caload frequent pair +++void TemplateTable::fast_icaload() +++{ +++ transition(vtos, itos); +++ // load index out of locals +++ locals_index(x12); +++ __ lw(x11, iaddress(x12, x11, _masm)); +++ __ pop_ptr(x10); +++ +++ // x10: array +++ // x11: index +++ index_check(x10, x11); // leaves index in x11, kills t0 +++ __ add(x11, x11, arrayOopDesc::base_offset_in_bytes(T_CHAR) >> 1); // addi, max imm is 2^11 +++ __ shadd(x10, x11, x10, t0, 1); +++ __ access_load_at(T_CHAR, IN_HEAP | IS_ARRAY, x10, Address(x10), noreg, noreg); +++} +++ +++void TemplateTable::saload() +++{ +++ transition(itos, itos); +++ __ mv(x11, x10); +++ __ pop_ptr(x10); +++ // x10: array +++ // x11: index +++ index_check(x10, x11); // leaves index in x11, kills t0 +++ __ add(x11, x11, arrayOopDesc::base_offset_in_bytes(T_SHORT) >> 1); +++ __ shadd(x10, x11, x10, t0, 1); +++ __ access_load_at(T_SHORT, IN_HEAP | IS_ARRAY, x10, Address(x10), noreg, noreg); +++} +++ +++void TemplateTable::iload(int n) +++{ +++ transition(vtos, itos); +++ __ lw(x10, iaddress(n)); +++} +++ +++void TemplateTable::lload(int n) +++{ +++ transition(vtos, ltos); +++ __ ld(x10, laddress(n)); +++} +++ +++void TemplateTable::fload(int n) +++{ +++ transition(vtos, ftos); +++ __ flw(f10, faddress(n)); +++} +++ +++void TemplateTable::dload(int n) +++{ +++ transition(vtos, dtos); +++ __ fld(f10, daddress(n)); +++} +++ +++void TemplateTable::aload(int n) +++{ +++ transition(vtos, atos); +++ __ ld(x10, iaddress(n)); +++} +++ +++void TemplateTable::aload_0() { +++ aload_0_internal(); +++} +++ +++void TemplateTable::nofast_aload_0() { +++ aload_0_internal(may_not_rewrite); +++} +++ +++void TemplateTable::aload_0_internal(RewriteControl rc) { +++ // According to bytecode histograms, the pairs: +++ // +++ // _aload_0, _fast_igetfield +++ // _aload_0, _fast_agetfield +++ // _aload_0, _fast_fgetfield +++ // +++ // occur frequently. If RewriteFrequentPairs is set, the (slow) +++ // _aload_0 bytecode checks if the next bytecode is either +++ // _fast_igetfield, _fast_agetfield or _fast_fgetfield and then +++ // rewrites the current bytecode into a pair bytecode; otherwise it +++ // rewrites the current bytecode into _fast_aload_0 that doesn't do +++ // the pair check anymore. +++ // +++ // Note: If the next bytecode is _getfield, the rewrite must be +++ // delayed, otherwise we may miss an opportunity for a pair. +++ // +++ // Also rewrite frequent pairs +++ // aload_0, aload_1 +++ // aload_0, iload_1 +++ // These bytecodes with a small amount of code are most profitable +++ // to rewrite +++ if (RewriteFrequentPairs && rc == may_rewrite) { +++ Label rewrite, done; +++ const Register bc = x14; +++ +++ // get next bytecode +++ __ load_unsigned_byte(x11, at_bcp(Bytecodes::length_for(Bytecodes::_aload_0))); +++ +++ // if _getfield then wait with rewrite +++ __ mv(t1, Bytecodes::Bytecodes::_getfield); +++ __ beq(x11, t1, done); +++ +++ // if _igetfield then rewrite to _fast_iaccess_0 +++ assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == Bytecodes::_aload_0, "fix bytecode definition"); +++ __ mv(t1, Bytecodes::_fast_igetfield); +++ __ mv(bc, Bytecodes::_fast_iaccess_0); +++ __ beq(x11, t1, rewrite); +++ +++ // if _agetfield then rewrite to _fast_aaccess_0 +++ assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == Bytecodes::_aload_0, "fix bytecode definition"); +++ __ mv(t1, Bytecodes::_fast_agetfield); +++ __ mv(bc, Bytecodes::_fast_aaccess_0); +++ __ beq(x11, t1, rewrite); +++ +++ // if _fgetfield then rewrite to _fast_faccess_0 +++ assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == Bytecodes::_aload_0, "fix bytecode definition"); +++ __ mv(t1, Bytecodes::_fast_fgetfield); +++ __ mv(bc, Bytecodes::_fast_faccess_0); +++ __ beq(x11, t1, rewrite); +++ +++ // else rewrite to _fast_aload0 +++ assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == Bytecodes::_aload_0, "fix bytecode definition"); +++ __ mv(bc, Bytecodes::Bytecodes::_fast_aload_0); +++ +++ // rewrite +++ // bc: new bytecode +++ __ bind(rewrite); +++ patch_bytecode(Bytecodes::_aload_0, bc, x11, false); +++ +++ __ bind(done); +++ } +++ +++ // Do actual aload_0 (must do this after patch_bytecode which might call VM and GC might change oop). +++ aload(0); +++} +++ +++void TemplateTable::istore() +++{ +++ transition(itos, vtos); +++ locals_index(x11); +++ __ sw(x10, iaddress(x11, t0, _masm)); +++} +++ +++void TemplateTable::lstore() +++{ +++ transition(ltos, vtos); +++ locals_index(x11); +++ __ sd(x10, laddress(x11, t0, _masm)); +++} +++ +++void TemplateTable::fstore() { +++ transition(ftos, vtos); +++ locals_index(x11); +++ __ fsw(f10, iaddress(x11, t0, _masm)); +++} +++ +++void TemplateTable::dstore() { +++ transition(dtos, vtos); +++ locals_index(x11); +++ __ fsd(f10, daddress(x11, t0, _masm)); +++} +++ +++void TemplateTable::astore() +++{ +++ transition(vtos, vtos); +++ __ pop_ptr(x10); +++ locals_index(x11); +++ __ sd(x10, aaddress(x11, t0, _masm)); +++} +++ +++void TemplateTable::wide_istore() { +++ transition(vtos, vtos); +++ __ pop_i(); +++ locals_index_wide(x11); +++ __ sw(x10, iaddress(x11, t0, _masm)); +++} +++ +++void TemplateTable::wide_lstore() { +++ transition(vtos, vtos); +++ __ pop_l(); +++ locals_index_wide(x11); +++ __ sd(x10, laddress(x11, t0, _masm)); +++} +++ +++void TemplateTable::wide_fstore() { +++ transition(vtos, vtos); +++ __ pop_f(); +++ locals_index_wide(x11); +++ __ fsw(f10, faddress(x11, t0, _masm)); +++} +++ +++void TemplateTable::wide_dstore() { +++ transition(vtos, vtos); +++ __ pop_d(); +++ locals_index_wide(x11); +++ __ fsd(f10, daddress(x11, t0, _masm)); +++} +++ +++void TemplateTable::wide_astore() { +++ transition(vtos, vtos); +++ __ pop_ptr(x10); +++ locals_index_wide(x11); +++ __ sd(x10, aaddress(x11, t0, _masm)); +++} +++ +++void TemplateTable::iastore() { +++ transition(itos, vtos); +++ __ pop_i(x11); +++ __ pop_ptr(x13); +++ // x10: value +++ // x11: index +++ // x13: array +++ index_check(x13, x11); // prefer index in x11 +++ __ add(x11, x11, arrayOopDesc::base_offset_in_bytes(T_INT) >> 2); +++ __ shadd(t0, x11, x13, t0, 2); +++ __ access_store_at(T_INT, IN_HEAP | IS_ARRAY, Address(t0, 0), x10, noreg, noreg); +++} +++ +++void TemplateTable::lastore() { +++ transition(ltos, vtos); +++ __ pop_i(x11); +++ __ pop_ptr(x13); +++ // x10: value +++ // x11: index +++ // x13: array +++ index_check(x13, x11); // prefer index in x11 +++ __ add(x11, x11, arrayOopDesc::base_offset_in_bytes(T_LONG) >> 3); +++ __ shadd(t0, x11, x13, t0, 3); +++ __ access_store_at(T_LONG, IN_HEAP | IS_ARRAY, Address(t0, 0), x10, noreg, noreg); +++} +++ +++void TemplateTable::fastore() { +++ transition(ftos, vtos); +++ __ pop_i(x11); +++ __ pop_ptr(x13); +++ // f10: value +++ // x11: index +++ // x13: array +++ index_check(x13, x11); // prefer index in x11 +++ __ add(x11, x11, arrayOopDesc::base_offset_in_bytes(T_FLOAT) >> 2); +++ __ shadd(t0, x11, x13, t0, 2); +++ __ access_store_at(T_FLOAT, IN_HEAP | IS_ARRAY, Address(t0, 0), noreg /* ftos */, noreg, noreg); +++} +++ +++void TemplateTable::dastore() { +++ transition(dtos, vtos); +++ __ pop_i(x11); +++ __ pop_ptr(x13); +++ // f10: value +++ // x11: index +++ // x13: array +++ index_check(x13, x11); // prefer index in x11 +++ __ add(x11, x11, arrayOopDesc::base_offset_in_bytes(T_DOUBLE) >> 3); +++ __ shadd(t0, x11, x13, t0, 3); +++ __ access_store_at(T_DOUBLE, IN_HEAP | IS_ARRAY, Address(t0, 0), noreg /* dtos */, noreg, noreg); +++} +++ +++void TemplateTable::aastore() { +++ Label is_null, ok_is_subtype, done; +++ transition(vtos, vtos); +++ // stack: ..., array, index, value +++ __ ld(x10, at_tos()); // value +++ __ ld(x12, at_tos_p1()); // index +++ __ ld(x13, at_tos_p2()); // array +++ +++ index_check(x13, x12); // kills x11 +++ __ add(x14, x12, arrayOopDesc::base_offset_in_bytes(T_OBJECT) >> LogBytesPerHeapOop); +++ __ shadd(x14, x14, x13, x14, LogBytesPerHeapOop); +++ +++ Address element_address(x14, 0); +++ +++ // do array store check - check for NULL value first +++ __ beqz(x10, is_null); +++ +++ // Move subklass into x11 +++ __ load_klass(x11, x10); +++ // Move superklass into x10 +++ __ load_klass(x10, x13); +++ __ ld(x10, Address(x10, +++ ObjArrayKlass::element_klass_offset())); +++ // Compress array + index * oopSize + 12 into a single register. Frees x12. +++ +++ // Generate subtype check. Blows x12, x15 +++ // Superklass in x10. Subklass in x11. +++ __ gen_subtype_check(x11, ok_is_subtype); //todo +++ +++ // Come here on failure +++ // object is at TOS +++ __ j(Interpreter::_throw_ArrayStoreException_entry); +++ +++ // Come here on success +++ __ bind(ok_is_subtype); +++ +++ // Get the value we will store +++ __ ld(x10, at_tos()); +++ // Now store using the appropriate barrier +++ do_oop_store(_masm, element_address, x10, IS_ARRAY); +++ __ j(done); +++ +++ // Have a NULL in x10, x13=array, x12=index. Store NULL at ary[idx] +++ __ bind(is_null); +++ __ profile_null_seen(x12); +++ +++ // Store a NULL +++ do_oop_store(_masm, element_address, noreg, IS_ARRAY); +++ +++ // Pop stack arguments +++ __ bind(done); +++ __ add(esp, esp, 3 * Interpreter::stackElementSize); +++ +++} +++ +++void TemplateTable::bastore() +++{ +++ transition(itos, vtos); +++ __ pop_i(x11); +++ __ pop_ptr(x13); +++ // x10: value +++ // x11: index +++ // x13: array +++ index_check(x13, x11); // prefer index in x11 +++ +++ // Need to check whether array is boolean or byte +++ // since both types share the bastore bytecode. +++ __ load_klass(x12, x13); +++ __ lwu(x12, Address(x12, Klass::layout_helper_offset())); +++ Label L_skip; +++ __ test_bit(t0, x12, exact_log2(Klass::layout_helper_boolean_diffbit())); +++ __ beqz(t0, L_skip); +++ __ andi(x10, x10, 1); // if it is a T_BOOLEAN array, mask the stored value to 0/1 +++ __ bind(L_skip); +++ +++ __ add(x11, x11, arrayOopDesc::base_offset_in_bytes(T_BYTE) >> 0); +++ +++ __ add(x11, x13, x11); +++ __ access_store_at(T_BYTE, IN_HEAP | IS_ARRAY, Address(x11, 0), x10, noreg, noreg); +++} +++ +++void TemplateTable::castore() +++{ +++ transition(itos, vtos); +++ __ pop_i(x11); +++ __ pop_ptr(x13); +++ // x10: value +++ // x11: index +++ // x13: array +++ index_check(x13, x11); // prefer index in x11 +++ __ add(x11, x11, arrayOopDesc::base_offset_in_bytes(T_CHAR) >> 1); +++ __ shadd(t0, x11, x13, t0, 1); +++ __ access_store_at(T_CHAR, IN_HEAP | IS_ARRAY, Address(t0, 0), x10, noreg, noreg); +++} +++ +++void TemplateTable::sastore() +++{ +++ castore(); +++} +++ +++void TemplateTable::istore(int n) +++{ +++ transition(itos, vtos); +++ __ sd(x10, iaddress(n)); +++} +++ +++void TemplateTable::lstore(int n) +++{ +++ transition(ltos, vtos); +++ __ sd(x10, laddress(n)); +++} +++ +++void TemplateTable::fstore(int n) +++{ +++ transition(ftos, vtos); +++ __ fsw(f10, faddress(n)); +++} +++ +++void TemplateTable::dstore(int n) +++{ +++ transition(dtos, vtos); +++ __ fsd(f10, daddress(n)); +++} +++ +++void TemplateTable::astore(int n) +++{ +++ transition(vtos, vtos); +++ __ pop_ptr(x10); +++ __ sd(x10, iaddress(n)); +++} +++ +++void TemplateTable::pop() +++{ +++ transition(vtos, vtos); +++ __ addi(esp, esp, Interpreter::stackElementSize); +++} +++ +++void TemplateTable::pop2() +++{ +++ transition(vtos, vtos); +++ __ addi(esp, esp, 2 * Interpreter::stackElementSize); +++} +++ +++void TemplateTable::dup() +++{ +++ transition(vtos, vtos); +++ __ ld(x10, Address(esp, 0)); +++ __ push_reg(x10); +++ // stack: ..., a, a +++} +++ +++void TemplateTable::dup_x1() +++{ +++ transition(vtos, vtos); +++ // stack: ..., a, b +++ __ ld(x10, at_tos()); // load b +++ __ ld(x12, at_tos_p1()); // load a +++ __ sd(x10, at_tos_p1()); // store b +++ __ sd(x12, at_tos()); // store a +++ __ push_reg(x10); // push b +++ // stack: ..., b, a, b +++} +++ +++void TemplateTable::dup_x2() +++{ +++ transition(vtos, vtos); +++ // stack: ..., a, b, c +++ __ ld(x10, at_tos()); // load c +++ __ ld(x12, at_tos_p2()); // load a +++ __ sd(x10, at_tos_p2()); // store c in a +++ __ push_reg(x10); // push c +++ // stack: ..., c, b, c, c +++ __ ld(x10, at_tos_p2()); // load b +++ __ sd(x12, at_tos_p2()); // store a in b +++ // stack: ..., c, a, c, c +++ __ sd(x10, at_tos_p1()); // store b in c +++ // stack: ..., c, a, b, c +++} +++ +++void TemplateTable::dup2() +++{ +++ transition(vtos, vtos); +++ // stack: ..., a, b +++ __ ld(x10, at_tos_p1()); // load a +++ __ push_reg(x10); // push a +++ __ ld(x10, at_tos_p1()); // load b +++ __ push_reg(x10); // push b +++ // stack: ..., a, b, a, b +++} +++ +++void TemplateTable::dup2_x1() +++{ +++ transition(vtos, vtos); +++ // stack: ..., a, b, c +++ __ ld(x12, at_tos()); // load c +++ __ ld(x10, at_tos_p1()); // load b +++ __ push_reg(x10); // push b +++ __ push_reg(x12); // push c +++ // stack: ..., a, b, c, b, c +++ __ sd(x12, at_tos_p3()); // store c in b +++ // stack: ..., a, c, c, b, c +++ __ ld(x12, at_tos_p4()); // load a +++ __ sd(x12, at_tos_p2()); // store a in 2nd c +++ // stack: ..., a, c, a, b, c +++ __ sd(x10, at_tos_p4()); // store b in a +++ // stack: ..., b, c, a, b, c +++} +++ +++void TemplateTable::dup2_x2() +++{ +++ transition(vtos, vtos); +++ // stack: ..., a, b, c, d +++ __ ld(x12, at_tos()); // load d +++ __ ld(x10, at_tos_p1()); // load c +++ __ push_reg(x10); // push c +++ __ push_reg(x12); // push d +++ // stack: ..., a, b, c, d, c, d +++ __ ld(x10, at_tos_p4()); // load b +++ __ sd(x10, at_tos_p2()); // store b in d +++ __ sd(x12, at_tos_p4()); // store d in b +++ // stack: ..., a, d, c, b, c, d +++ __ ld(x12, at_tos_p5()); // load a +++ __ ld(x10, at_tos_p3()); // load c +++ __ sd(x12, at_tos_p3()); // store a in c +++ __ sd(x10, at_tos_p5()); // store c in a +++ // stack: ..., c, d, a, b, c, d +++} +++ +++void TemplateTable::swap() +++{ +++ transition(vtos, vtos); +++ // stack: ..., a, b +++ __ ld(x12, at_tos_p1()); // load a +++ __ ld(x10, at_tos()); // load b +++ __ sd(x12, at_tos()); // store a in b +++ __ sd(x10, at_tos_p1()); // store b in a +++ // stack: ..., b, a +++} +++ +++void TemplateTable::iop2(Operation op) +++{ +++ transition(itos, itos); +++ // x10 <== x11 op x10 +++ __ pop_i(x11); +++ switch (op) { +++ case add : __ addw(x10, x11, x10); break; +++ case sub : __ subw(x10, x11, x10); break; +++ case mul : __ mulw(x10, x11, x10); break; +++ case _and : __ andrw(x10, x11, x10); break; +++ case _or : __ orrw(x10, x11, x10); break; +++ case _xor : __ xorrw(x10, x11, x10); break; +++ case shl : __ sllw(x10, x11, x10); break; +++ case shr : __ sraw(x10, x11, x10); break; +++ case ushr : __ srlw(x10, x11, x10); break; +++ default : ShouldNotReachHere(); +++ } +++} +++ +++void TemplateTable::lop2(Operation op) +++{ +++ transition(ltos, ltos); +++ // x10 <== x11 op x10 +++ __ pop_l(x11); +++ switch (op) { +++ case add : __ add(x10, x11, x10); break; +++ case sub : __ sub(x10, x11, x10); break; +++ case mul : __ mul(x10, x11, x10); break; +++ case _and : __ andr(x10, x11, x10); break; +++ case _or : __ orr(x10, x11, x10); break; +++ case _xor : __ xorr(x10, x11, x10); break; +++ default : ShouldNotReachHere(); +++ } +++} +++ +++void TemplateTable::idiv() +++{ +++ transition(itos, itos); +++ // explicitly check for div0 +++ Label no_div0; +++ __ bnez(x10, no_div0); +++ __ mv(t0, Interpreter::_throw_ArithmeticException_entry); +++ __ jr(t0); +++ __ bind(no_div0); +++ __ pop_i(x11); +++ // x10 <== x11 idiv x10 +++ __ corrected_idivl(x10, x11, x10, /* want_remainder */ false); +++} +++ +++void TemplateTable::irem() +++{ +++ transition(itos, itos); +++ // explicitly check for div0 +++ Label no_div0; +++ __ bnez(x10, no_div0); +++ __ mv(t0, Interpreter::_throw_ArithmeticException_entry); +++ __ jr(t0); +++ __ bind(no_div0); +++ __ pop_i(x11); +++ // x10 <== x11 irem x10 +++ __ corrected_idivl(x10, x11, x10, /* want_remainder */ true); +++} +++ +++void TemplateTable::lmul() +++{ +++ transition(ltos, ltos); +++ __ pop_l(x11); +++ __ mul(x10, x10, x11); +++} +++ +++void TemplateTable::ldiv() +++{ +++ transition(ltos, ltos); +++ // explicitly check for div0 +++ Label no_div0; +++ __ bnez(x10, no_div0); +++ __ mv(t0, Interpreter::_throw_ArithmeticException_entry); +++ __ jr(t0); +++ __ bind(no_div0); +++ __ pop_l(x11); +++ // x10 <== x11 ldiv x10 +++ __ corrected_idivq(x10, x11, x10, /* want_remainder */ false); +++} +++ +++void TemplateTable::lrem() +++{ +++ transition(ltos, ltos); +++ // explicitly check for div0 +++ Label no_div0; +++ __ bnez(x10, no_div0); +++ __ mv(t0, Interpreter::_throw_ArithmeticException_entry); +++ __ jr(t0); +++ __ bind(no_div0); +++ __ pop_l(x11); +++ // x10 <== x11 lrem x10 +++ __ corrected_idivq(x10, x11, x10, /* want_remainder */ true); +++} +++ +++void TemplateTable::lshl() +++{ +++ transition(itos, ltos); +++ // shift count is in x10 +++ __ pop_l(x11); +++ __ sll(x10, x11, x10); +++} +++ +++void TemplateTable::lshr() +++{ +++ transition(itos, ltos); +++ // shift count is in x10 +++ __ pop_l(x11); +++ __ sra(x10, x11, x10); +++} +++ +++void TemplateTable::lushr() +++{ +++ transition(itos, ltos); +++ // shift count is in x10 +++ __ pop_l(x11); +++ __ srl(x10, x11, x10); +++} +++ +++void TemplateTable::fop2(Operation op) +++{ +++ transition(ftos, ftos); +++ switch (op) { +++ case add: +++ __ pop_f(f11); +++ __ fadd_s(f10, f11, f10); +++ break; +++ case sub: +++ __ pop_f(f11); +++ __ fsub_s(f10, f11, f10); +++ break; +++ case mul: +++ __ pop_f(f11); +++ __ fmul_s(f10, f11, f10); +++ break; +++ case div: +++ __ pop_f(f11); +++ __ fdiv_s(f10, f11, f10); +++ break; +++ case rem: +++ __ fmv_s(f11, f10); +++ __ pop_f(f10); +++ __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem)); +++ break; +++ default: +++ ShouldNotReachHere(); +++ } +++} +++ +++void TemplateTable::dop2(Operation op) +++{ +++ transition(dtos, dtos); +++ switch (op) { +++ case add: +++ __ pop_d(f11); +++ __ fadd_d(f10, f11, f10); +++ break; +++ case sub: +++ __ pop_d(f11); +++ __ fsub_d(f10, f11, f10); +++ break; +++ case mul: +++ __ pop_d(f11); +++ __ fmul_d(f10, f11, f10); +++ break; +++ case div: +++ __ pop_d(f11); +++ __ fdiv_d(f10, f11, f10); +++ break; +++ case rem: +++ __ fmv_d(f11, f10); +++ __ pop_d(f10); +++ __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem)); +++ break; +++ default: +++ ShouldNotReachHere(); +++ } +++} +++ +++void TemplateTable::ineg() +++{ +++ transition(itos, itos); +++ __ negw(x10, x10); +++} +++ +++void TemplateTable::lneg() +++{ +++ transition(ltos, ltos); +++ __ neg(x10, x10); +++} +++ +++void TemplateTable::fneg() +++{ +++ transition(ftos, ftos); +++ __ fneg_s(f10, f10); +++} +++ +++void TemplateTable::dneg() +++{ +++ transition(dtos, dtos); +++ __ fneg_d(f10, f10); +++} +++ +++void TemplateTable::iinc() +++{ +++ transition(vtos, vtos); +++ __ load_signed_byte(x11, at_bcp(2)); // get constant +++ locals_index(x12); +++ __ ld(x10, iaddress(x12, x10, _masm)); +++ __ addw(x10, x10, x11); +++ __ sd(x10, iaddress(x12, t0, _masm)); +++} +++ +++void TemplateTable::wide_iinc() +++{ +++ transition(vtos, vtos); +++ __ lwu(x11, at_bcp(2)); // get constant and index +++ __ revb_h_w_u(x11, x11); // reverse bytes in half-word (32bit) and zero-extend +++ __ zero_extend(x12, x11, 16); +++ __ neg(x12, x12); +++ __ slli(x11, x11, 32); +++ __ srai(x11, x11, 48); +++ __ ld(x10, iaddress(x12, t0, _masm)); +++ __ addw(x10, x10, x11); +++ __ sd(x10, iaddress(x12, t0, _masm)); +++} +++ +++void TemplateTable::convert() +++{ +++ // Checking +++#ifdef ASSERT +++ { +++ TosState tos_in = ilgl; +++ TosState tos_out = ilgl; +++ switch (bytecode()) { +++ case Bytecodes::_i2l: // fall through +++ case Bytecodes::_i2f: // fall through +++ case Bytecodes::_i2d: // fall through +++ case Bytecodes::_i2b: // fall through +++ case Bytecodes::_i2c: // fall through +++ case Bytecodes::_i2s: tos_in = itos; break; +++ case Bytecodes::_l2i: // fall through +++ case Bytecodes::_l2f: // fall through +++ case Bytecodes::_l2d: tos_in = ltos; break; +++ case Bytecodes::_f2i: // fall through +++ case Bytecodes::_f2l: // fall through +++ case Bytecodes::_f2d: tos_in = ftos; break; +++ case Bytecodes::_d2i: // fall through +++ case Bytecodes::_d2l: // fall through +++ case Bytecodes::_d2f: tos_in = dtos; break; +++ default : ShouldNotReachHere(); +++ } +++ switch (bytecode()) { +++ case Bytecodes::_l2i: // fall through +++ case Bytecodes::_f2i: // fall through +++ case Bytecodes::_d2i: // fall through +++ case Bytecodes::_i2b: // fall through +++ case Bytecodes::_i2c: // fall through +++ case Bytecodes::_i2s: tos_out = itos; break; +++ case Bytecodes::_i2l: // fall through +++ case Bytecodes::_f2l: // fall through +++ case Bytecodes::_d2l: tos_out = ltos; break; +++ case Bytecodes::_i2f: // fall through +++ case Bytecodes::_l2f: // fall through +++ case Bytecodes::_d2f: tos_out = ftos; break; +++ case Bytecodes::_i2d: // fall through +++ case Bytecodes::_l2d: // fall through +++ case Bytecodes::_f2d: tos_out = dtos; break; +++ default : ShouldNotReachHere(); +++ } +++ transition(tos_in, tos_out); +++ } +++#endif // ASSERT +++ +++ // Conversion +++ switch (bytecode()) { +++ case Bytecodes::_i2l: +++ __ sign_extend(x10, x10, 32); +++ break; +++ case Bytecodes::_i2f: +++ __ fcvt_s_w(f10, x10); +++ break; +++ case Bytecodes::_i2d: +++ __ fcvt_d_w(f10, x10); +++ break; +++ case Bytecodes::_i2b: +++ __ sign_extend(x10, x10, 8); +++ break; +++ case Bytecodes::_i2c: +++ __ zero_extend(x10, x10, 16); +++ break; +++ case Bytecodes::_i2s: +++ __ sign_extend(x10, x10, 16); +++ break; +++ case Bytecodes::_l2i: +++ __ sign_extend(x10, x10, 32); +++ break; +++ case Bytecodes::_l2f: +++ __ fcvt_s_l(f10, x10); +++ break; +++ case Bytecodes::_l2d: +++ __ fcvt_d_l(f10, x10); +++ break; +++ case Bytecodes::_f2i: +++ __ fcvt_w_s_safe(x10, f10); +++ break; +++ case Bytecodes::_f2l: +++ __ fcvt_l_s_safe(x10, f10); +++ break; +++ case Bytecodes::_f2d: +++ __ fcvt_d_s(f10, f10); +++ break; +++ case Bytecodes::_d2i: +++ __ fcvt_w_d_safe(x10, f10); +++ break; +++ case Bytecodes::_d2l: +++ __ fcvt_l_d_safe(x10, f10); +++ break; +++ case Bytecodes::_d2f: +++ __ fcvt_s_d(f10, f10); +++ break; +++ default: +++ ShouldNotReachHere(); +++ } +++} +++ +++void TemplateTable::lcmp() +++{ +++ transition(ltos, itos); +++ __ pop_l(x11); +++ __ cmp_l2i(t0, x11, x10); +++ __ mv(x10, t0); +++} +++ +++void TemplateTable::float_cmp(bool is_float, int unordered_result) +++{ +++ // For instruction feq, flt and fle, the result is 0 if either operand is NaN +++ if (is_float) { +++ __ pop_f(f11); +++ // if unordered_result < 0: +++ // we want -1 for unordered or less than, 0 for equal and 1 for +++ // greater than. +++ // else: +++ // we want -1 for less than, 0 for equal and 1 for unordered or +++ // greater than. +++ // f11 primary, f10 secondary +++ __ float_compare(x10, f11, f10, unordered_result); +++ } else { +++ __ pop_d(f11); +++ // if unordered_result < 0: +++ // we want -1 for unordered or less than, 0 for equal and 1 for +++ // greater than. +++ // else: +++ // we want -1 for less than, 0 for equal and 1 for unordered or +++ // greater than. +++ // f11 primary, f10 secondary +++ __ double_compare(x10, f11, f10, unordered_result); +++ } +++} +++ +++void TemplateTable::branch(bool is_jsr, bool is_wide) +++{ +++ __ profile_taken_branch(x10, x11); +++ const ByteSize be_offset = MethodCounters::backedge_counter_offset() + +++ InvocationCounter::counter_offset(); +++ const ByteSize inv_offset = MethodCounters::invocation_counter_offset() + +++ InvocationCounter::counter_offset(); +++ +++ // load branch displacement +++ if (!is_wide) { +++ __ lhu(x12, at_bcp(1)); +++ __ revb_h_h(x12, x12); // reverse bytes in half-word and sign-extend +++ } else { +++ __ lwu(x12, at_bcp(1)); +++ __ revb_w_w(x12, x12); // reverse bytes in word and sign-extend +++ } +++ +++ // Handle all the JSR stuff here, then exit. +++ // It's much shorter and cleaner than intermingling with the non-JSR +++ // normal-branch stuff occurring below. +++ +++ if (is_jsr) { +++ // compute return address as bci +++ __ ld(t1, Address(xmethod, Method::const_offset())); +++ __ add(t1, t1, +++ in_bytes(ConstMethod::codes_offset()) - (is_wide ? 5 : 3)); +++ __ sub(x11, xbcp, t1); +++ __ push_i(x11); +++ // Adjust the bcp by the 16-bit displacement in x12 +++ __ add(xbcp, xbcp, x12); +++ __ load_unsigned_byte(t0, Address(xbcp, 0)); +++ // load the next target bytecode into t0, it is the argument of dispatch_only +++ __ dispatch_only(vtos, /*generate_poll*/true); +++ return; +++ } +++ +++ // Normal (non-jsr) branch handling +++ +++ // Adjust the bcp by the displacement in x12 +++ __ add(xbcp, xbcp, x12); +++ +++ assert(UseLoopCounter || !UseOnStackReplacement, +++ "on-stack-replacement requires loop counters"); +++ Label backedge_counter_overflow; +++ Label dispatch; +++ if (UseLoopCounter) { +++ // increment backedge counter for backward branches +++ // x10: MDO +++ // x11: MDO bumped taken-count +++ // x12: target offset +++ __ bgtz(x12, dispatch); // count only if backward branch +++ +++ // check if MethodCounters exists +++ Label has_counters; +++ __ ld(t0, Address(xmethod, Method::method_counters_offset())); +++ __ bnez(t0, has_counters); +++ __ push_reg(x10); +++ __ push_reg(x11); +++ __ push_reg(x12); +++ __ call_VM(noreg, CAST_FROM_FN_PTR(address, +++ InterpreterRuntime::build_method_counters), xmethod); +++ __ pop_reg(x12); +++ __ pop_reg(x11); +++ __ pop_reg(x10); +++ __ ld(t0, Address(xmethod, Method::method_counters_offset())); +++ __ beqz(t0, dispatch); // No MethodCounters allocated, OutOfMemory +++ __ bind(has_counters); +++ +++ Label no_mdo; +++ int increment = InvocationCounter::count_increment; +++ if (ProfileInterpreter) { +++ // Are we profiling? +++ __ ld(x11, Address(xmethod, in_bytes(Method::method_data_offset()))); +++ __ beqz(x11, no_mdo); +++ // Increment the MDO backedge counter +++ const Address mdo_backedge_counter(x11, in_bytes(MethodData::backedge_counter_offset()) + +++ in_bytes(InvocationCounter::counter_offset())); +++ const Address mask(x11, in_bytes(MethodData::backedge_mask_offset())); +++ __ increment_mask_and_jump(mdo_backedge_counter, increment, mask, +++ x10, t0, false, +++ UseOnStackReplacement ? &backedge_counter_overflow : &dispatch); +++ __ j(dispatch); +++ } +++ __ bind(no_mdo); +++ // Increment backedge counter in MethodCounters* +++ __ ld(t0, Address(xmethod, Method::method_counters_offset())); +++ const Address mask(t0, in_bytes(MethodCounters::backedge_mask_offset())); +++ __ increment_mask_and_jump(Address(t0, be_offset), increment, mask, +++ x10, t1, false, +++ UseOnStackReplacement ? &backedge_counter_overflow : &dispatch); +++ __ bind(dispatch); +++ } +++ +++ // Pre-load the next target bytecode into t0 +++ __ load_unsigned_byte(t0, Address(xbcp, 0)); +++ +++ // continue with the bytecode @ target +++ // t0: target bytecode +++ // xbcp: target bcp +++ __ dispatch_only(vtos, /*generate_poll*/true); +++ +++ if (UseLoopCounter && UseOnStackReplacement) { +++ // invocation counter overflow +++ __ bind(backedge_counter_overflow); +++ __ neg(x12, x12); +++ __ add(x12, x12, xbcp); // branch xbcp +++ // IcoResult frequency_counter_overflow([JavaThread*], address branch_bcp) +++ __ call_VM(noreg, +++ CAST_FROM_FN_PTR(address, +++ InterpreterRuntime::frequency_counter_overflow), +++ x12); +++ __ load_unsigned_byte(x11, Address(xbcp, 0)); // restore target bytecode +++ +++ // x10: osr nmethod (osr ok) or NULL (osr not possible) +++ // w11: target bytecode +++ // x12: temporary +++ __ beqz(x10, dispatch); // test result -- no osr if null +++ // nmethod may have been invalidated (VM may block upon call_VM return) +++ __ lbu(x12, Address(x10, nmethod::state_offset())); +++ if (nmethod::in_use != 0) { +++ __ sub(x12, x12, nmethod::in_use); +++ } +++ __ bnez(x12, dispatch); +++ +++ // We have the address of an on stack replacement routine in x10 +++ // We need to prepare to execute the OSR method. First we must +++ // migrate the locals and monitors off of the stack. +++ +++ __ mv(x9, x10); // save the nmethod +++ +++ call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin)); +++ +++ // x10 is OSR buffer, move it to expected parameter location +++ __ mv(j_rarg0, x10); +++ +++ // remove activation +++ // get sender esp +++ __ ld(esp, +++ Address(fp, frame::interpreter_frame_sender_sp_offset * wordSize)); +++ // remove frame anchor +++ __ leave(); +++ // Ensure compiled code always sees stack at proper alignment +++ __ andi(sp, esp, -16); +++ +++ // and begin the OSR nmethod +++ __ ld(t0, Address(x9, nmethod::osr_entry_point_offset())); +++ __ jr(t0); +++ } +++} +++ +++void TemplateTable::if_0cmp(Condition cc) +++{ +++ transition(itos, vtos); +++ // assume branch is more often taken than not (loops use backward branches) +++ Label not_taken; +++ +++ __ sign_extend(x10, x10, 32); +++ switch (cc) { +++ case equal: +++ __ bnez(x10, not_taken); +++ break; +++ case not_equal: +++ __ beqz(x10, not_taken); +++ break; +++ case less: +++ __ bgez(x10, not_taken); +++ break; +++ case less_equal: +++ __ bgtz(x10, not_taken); +++ break; +++ case greater: +++ __ blez(x10, not_taken); +++ break; +++ case greater_equal: +++ __ bltz(x10, not_taken); +++ break; +++ default: +++ break; +++ } +++ +++ branch(false, false); +++ __ bind(not_taken); +++ __ profile_not_taken_branch(x10); +++} +++ +++void TemplateTable::if_icmp(Condition cc) +++{ +++ transition(itos, vtos); +++ // assume branch is more often taken than not (loops use backward branches) +++ Label not_taken; +++ __ pop_i(x11); +++ __ sign_extend(x10, x10, 32); +++ switch (cc) { +++ case equal: +++ __ bne(x11, x10, not_taken); +++ break; +++ case not_equal: +++ __ beq(x11, x10, not_taken); +++ break; +++ case less: +++ __ bge(x11, x10, not_taken); +++ break; +++ case less_equal: +++ __ bgt(x11, x10, not_taken); +++ break; +++ case greater: +++ __ ble(x11, x10, not_taken); +++ break; +++ case greater_equal: +++ __ blt(x11, x10, not_taken); +++ break; +++ default: +++ break; +++ } +++ +++ branch(false, false); +++ __ bind(not_taken); +++ __ profile_not_taken_branch(x10); +++} +++ +++void TemplateTable::if_nullcmp(Condition cc) +++{ +++ transition(atos, vtos); +++ // assume branch is more often taken than not (loops use backward branches) +++ Label not_taken; +++ if (cc == equal) { +++ __ bnez(x10, not_taken); +++ } else { +++ __ beqz(x10, not_taken); +++ } +++ branch(false, false); +++ __ bind(not_taken); +++ __ profile_not_taken_branch(x10); +++} +++ +++void TemplateTable::if_acmp(Condition cc) +++{ +++ transition(atos, vtos); +++ // assume branch is more often taken than not (loops use backward branches) +++ Label not_taken; +++ __ pop_ptr(x11); +++ +++ if (cc == equal) { +++ __ bne(x11, x10, not_taken); +++ } else if (cc == not_equal) { +++ __ beq(x11, x10, not_taken); +++ } +++ branch(false, false); +++ __ bind(not_taken); +++ __ profile_not_taken_branch(x10); +++} +++ +++void TemplateTable::ret() { +++ transition(vtos, vtos); +++ locals_index(x11); +++ __ ld(x11, aaddress(x11, t1, _masm)); // get return bci, compute return bcp +++ __ profile_ret(x11, x12); +++ __ ld(xbcp, Address(xmethod, Method::const_offset())); +++ __ add(xbcp, xbcp, x11); +++ __ addi(xbcp, xbcp, in_bytes(ConstMethod::codes_offset())); +++ __ dispatch_next(vtos, 0, /*generate_poll*/true); +++} +++ +++void TemplateTable::wide_ret() { +++ transition(vtos, vtos); +++ locals_index_wide(x11); +++ __ ld(x11, aaddress(x11, t0, _masm)); // get return bci, compute return bcp +++ __ profile_ret(x11, x12); +++ __ ld(xbcp, Address(xmethod, Method::const_offset())); +++ __ add(xbcp, xbcp, x11); +++ __ add(xbcp, xbcp, in_bytes(ConstMethod::codes_offset())); +++ __ dispatch_next(vtos, 0, /*generate_poll*/true); +++} +++ +++void TemplateTable::tableswitch() { +++ Label default_case, continue_execution; +++ transition(itos, vtos); +++ // align xbcp +++ __ la(x11, at_bcp(BytesPerInt)); +++ __ andi(x11, x11, -BytesPerInt); +++ // load lo & hi +++ __ lwu(x12, Address(x11, BytesPerInt)); +++ __ lwu(x13, Address(x11, 2 * BytesPerInt)); +++ __ revb_w_w(x12, x12); // reverse bytes in word (32bit) and sign-extend +++ __ revb_w_w(x13, x13); // reverse bytes in word (32bit) and sign-extend +++ // check against lo & hi +++ __ blt(x10, x12, default_case); +++ __ bgt(x10, x13, default_case); +++ // lookup dispatch offset +++ __ subw(x10, x10, x12); +++ __ shadd(x13, x10, x11, t0, 2); +++ __ lwu(x13, Address(x13, 3 * BytesPerInt)); +++ __ profile_switch_case(x10, x11, x12); +++ // continue execution +++ __ bind(continue_execution); +++ __ revb_w_w(x13, x13); // reverse bytes in word (32bit) and sign-extend +++ __ add(xbcp, xbcp, x13); +++ __ load_unsigned_byte(t0, Address(xbcp)); +++ __ dispatch_only(vtos, /*generate_poll*/true); +++ // handle default +++ __ bind(default_case); +++ __ profile_switch_default(x10); +++ __ lwu(x13, Address(x11, 0)); +++ __ j(continue_execution); +++} +++ +++void TemplateTable::lookupswitch() { +++ transition(itos, itos); +++ __ stop("lookupswitch bytecode should have been rewritten"); +++} +++ +++void TemplateTable::fast_linearswitch() { +++ transition(itos, vtos); +++ Label loop_entry, loop, found, continue_execution; +++ // bswap x10 so we can avoid bswapping the table entries +++ __ revb_w_w(x10, x10); // reverse bytes in word (32bit) and sign-extend +++ // align xbcp +++ __ la(x9, at_bcp(BytesPerInt)); // btw: should be able to get rid of +++ // this instruction (change offsets +++ // below) +++ __ andi(x9, x9, -BytesPerInt); +++ // set counter +++ __ lwu(x11, Address(x9, BytesPerInt)); +++ __ revb_w(x11, x11); +++ __ j(loop_entry); +++ // table search +++ __ bind(loop); +++ __ shadd(t0, x11, x9, t0, 3); +++ __ lw(t0, Address(t0, 2 * BytesPerInt)); +++ __ beq(x10, t0, found); +++ __ bind(loop_entry); +++ __ addi(x11, x11, -1); +++ __ bgez(x11, loop); +++ // default case +++ __ profile_switch_default(x10); +++ __ lwu(x13, Address(x9, 0)); +++ __ j(continue_execution); +++ // entry found -> get offset +++ __ bind(found); +++ __ shadd(t0, x11, x9, t0, 3); +++ __ lwu(x13, Address(t0, 3 * BytesPerInt)); +++ __ profile_switch_case(x11, x10, x9); +++ // continue execution +++ __ bind(continue_execution); +++ __ revb_w_w(x13, x13); // reverse bytes in word (32bit) and sign-extend +++ __ add(xbcp, xbcp, x13); +++ __ lbu(t0, Address(xbcp, 0)); +++ __ dispatch_only(vtos, /*generate_poll*/true); +++} +++ +++void TemplateTable::fast_binaryswitch() { +++ transition(itos, vtos); +++ // Implementation using the following core algorithm: +++ // +++ // int binary_search(int key, LookupswitchPair* array, int n) +++ // binary_search start: +++ // #Binary search according to "Methodik des Programmierens" by +++ // # Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985. +++ // int i = 0; +++ // int j = n; +++ // while (i + 1 < j) do +++ // # invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q) +++ // # with Q: for all i: 0 <= i < n: key < a[i] +++ // # where a stands for the array and assuming that the (inexisting) +++ // # element a[n] is infinitely big. +++ // int h = (i + j) >> 1 +++ // # i < h < j +++ // if (key < array[h].fast_match()) +++ // then [j = h] +++ // else [i = h] +++ // end +++ // # R: a[i] <= key < a[i+1] or Q +++ // # (i.e., if key is within array, i is the correct index) +++ // return i +++ // binary_search end +++ +++ +++ // Register allocation +++ const Register key = x10; // already set (tosca) +++ const Register array = x11; +++ const Register i = x12; +++ const Register j = x13; +++ const Register h = x14; +++ const Register temp = x15; +++ +++ // Find array start +++ __ la(array, at_bcp(3 * BytesPerInt)); // btw: should be able to +++ // get rid of this +++ // instruction (change +++ // offsets below) +++ __ andi(array, array, -BytesPerInt); +++ +++ // Initialize i & j +++ __ mv(i, zr); // i = 0 +++ __ lwu(j, Address(array, -BytesPerInt)); // j = length(array) +++ +++ // Convert j into native byteordering +++ __ revb_w(j, j); +++ +++ // And start +++ Label entry; +++ __ j(entry); +++ +++ // binary search loop +++ { +++ Label loop; +++ __ bind(loop); +++ __ addw(h, i, j); // h = i + j +++ __ srliw(h, h, 1); // h = (i + j) >> 1 +++ // if [key < array[h].fast_match()] +++ // then [j = h] +++ // else [i = h] +++ // Convert array[h].match to native byte-ordering before compare +++ __ shadd(temp, h, array, temp, 3); +++ __ ld(temp, Address(temp, 0)); +++ __ revb_w_w(temp, temp); // reverse bytes in word (32bit) and sign-extend +++ +++ Label L_done, L_greater; +++ __ bge(key, temp, L_greater); +++ // if [key < array[h].fast_match()] then j = h +++ __ mv(j, h); +++ __ j(L_done); +++ __ bind(L_greater); +++ // if [key >= array[h].fast_match()] then i = h +++ __ mv(i, h); +++ __ bind(L_done); +++ +++ // while [i + 1 < j] +++ __ bind(entry); +++ __ addiw(h, i, 1); // i + 1 +++ __ blt(h, j, loop); // i + 1 < j +++ } +++ +++ // end of binary search, result index is i (must check again!) +++ Label default_case; +++ // Convert array[i].match to native byte-ordering before compare +++ __ shadd(temp, i, array, temp, 3); +++ __ ld(temp, Address(temp, 0)); +++ __ revb_w_w(temp, temp); // reverse bytes in word (32bit) and sign-extend +++ __ bne(key, temp, default_case); +++ +++ // entry found -> j = offset +++ __ shadd(temp, i, array, temp, 3); +++ __ lwu(j, Address(temp, BytesPerInt)); +++ __ profile_switch_case(i, key, array); +++ __ revb_w_w(j, j); // reverse bytes in word (32bit) and sign-extend +++ +++ __ add(temp, xbcp, j); +++ __ load_unsigned_byte(t0, Address(temp, 0)); +++ +++ __ add(xbcp, xbcp, j); +++ __ la(xbcp, Address(xbcp, 0)); +++ __ dispatch_only(vtos, /*generate_poll*/true); +++ +++ // default case -> j = default offset +++ __ bind(default_case); +++ __ profile_switch_default(i); +++ __ lwu(j, Address(array, -2 * BytesPerInt)); +++ __ revb_w_w(j, j); // reverse bytes in word (32bit) and sign-extend +++ +++ __ add(temp, xbcp, j); +++ __ load_unsigned_byte(t0, Address(temp, 0)); +++ +++ __ add(xbcp, xbcp, j); +++ __ la(xbcp, Address(xbcp, 0)); +++ __ dispatch_only(vtos, /*generate_poll*/true); +++} +++ +++void TemplateTable::_return(TosState state) +++{ +++ transition(state, state); +++ assert(_desc->calls_vm(), +++ "inconsistent calls_vm information"); // call in remove_activation +++ +++ if (_desc->bytecode() == Bytecodes::_return_register_finalizer) { +++ assert(state == vtos, "only valid state"); +++ +++ __ ld(c_rarg1, aaddress(0)); +++ __ load_klass(x13, c_rarg1); +++ __ lwu(x13, Address(x13, Klass::access_flags_offset())); +++ Label skip_register_finalizer; +++ __ test_bit(t0, x13, exact_log2(JVM_ACC_HAS_FINALIZER)); +++ __ beqz(t0, skip_register_finalizer); +++ +++ __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), c_rarg1); +++ +++ __ bind(skip_register_finalizer); +++ } +++ +++ // Issue a StoreStore barrier after all stores but before return +++ // from any constructor for any class with a final field. We don't +++ // know if this is a finalizer, so we always do so. +++ if (_desc->bytecode() == Bytecodes::_return) { +++ __ membar(MacroAssembler::StoreStore); +++ } +++ +++ // Narrow result if state is itos but result type is smaller. +++ // Need to narrow in the return bytecode rather than in generate_return_entry +++ // since compiled code callers expect the result to already be narrowed. +++ if (state == itos) { +++ __ narrow(x10); +++ } +++ +++ __ remove_activation(state); +++ __ ret(); +++} +++ +++ +++// ---------------------------------------------------------------------------- +++// Volatile variables demand their effects be made known to all CPU's +++// in order. Store buffers on most chips allow reads & writes to +++// reorder; the JMM's ReadAfterWrite.java test fails in -Xint mode +++// without some kind of memory barrier (i.e., it's not sufficient that +++// the interpreter does not reorder volatile references, the hardware +++// also must not reorder them). +++// +++// According to the new Java Memory Model (JMM): +++// (1) All volatiles are serialized wrt to each other. ALSO reads & +++// writes act as aquire & release, so: +++// (2) A read cannot let unrelated NON-volatile memory refs that +++// happen after the read float up to before the read. It's OK for +++// non-volatile memory refs that happen before the volatile read to +++// float down below it. +++// (3) Similar a volatile write cannot let unrelated NON-volatile +++// memory refs that happen BEFORE the write float down to after the +++// write. It's OK for non-volatile memory refs that happen after the +++// volatile write to float up before it. +++// +++// We only put in barriers around volatile refs (they are expensive), +++// not _between_ memory refs (that would require us to track the +++// flavor of the previous memory refs). Requirements (2) and (3) +++// require some barriers before volatile stores and after volatile +++// loads. These nearly cover requirement (1) but miss the +++// volatile-store-volatile-load case. This final case is placed after +++// volatile-stores although it could just as well go before +++// volatile-loads. +++ +++void TemplateTable::resolve_cache_and_index(int byte_no, +++ Register Rcache, +++ Register index, +++ size_t index_size) { +++ const Register temp = x9; +++ assert_different_registers(Rcache, index, temp); +++ +++ Label resolved, clinit_barrier_slow; +++ +++ Bytecodes::Code code = bytecode(); +++ switch (code) { +++ case Bytecodes::_nofast_getfield: code = Bytecodes::_getfield; break; +++ case Bytecodes::_nofast_putfield: code = Bytecodes::_putfield; break; +++ default: break; +++ } +++ +++ assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); +++ __ get_cache_and_index_and_bytecode_at_bcp(Rcache, index, temp, byte_no, 1, index_size); +++ __ mv(t0, (int) code); +++ __ beq(temp, t0, resolved); +++ +++ // resolve first time through +++ // Class initialization barrier slow path lands here as well. +++ __ bind(clinit_barrier_slow); +++ +++ address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache); +++ __ mv(temp, (int) code); +++ __ call_VM(noreg, entry, temp); +++ +++ // Update registers with resolved info +++ __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size); +++ // n.b. unlike x86 Rcache is now rcpool plus the indexed offset +++ // so all clients ofthis method must be modified accordingly +++ __ bind(resolved); +++ +++ // Class initialization barrier for static methods +++ if (VM_Version::supports_fast_class_init_checks() && bytecode() == Bytecodes::_invokestatic) { +++ __ load_resolved_method_at_index(byte_no, temp, Rcache); +++ __ load_method_holder(temp, temp); +++ __ clinit_barrier(temp, t0, NULL, &clinit_barrier_slow); +++ } +++} +++ +++// The Rcache and index registers must be set before call +++// n.b unlike x86 cache already includes the index offset +++void TemplateTable::load_field_cp_cache_entry(Register obj, +++ Register cache, +++ Register index, +++ Register off, +++ Register flags, +++ bool is_static = false) { +++ assert_different_registers(cache, index, flags, off); +++ +++ ByteSize cp_base_offset = ConstantPoolCache::base_offset(); +++ // Field offset +++ __ ld(off, Address(cache, in_bytes(cp_base_offset + +++ ConstantPoolCacheEntry::f2_offset()))); +++ // Flags +++ __ lwu(flags, Address(cache, in_bytes(cp_base_offset + +++ ConstantPoolCacheEntry::flags_offset()))); +++ +++ // klass overwrite register +++ if (is_static) { +++ __ ld(obj, Address(cache, in_bytes(cp_base_offset + +++ ConstantPoolCacheEntry::f1_offset()))); +++ const int mirror_offset = in_bytes(Klass::java_mirror_offset()); +++ __ ld(obj, Address(obj, mirror_offset)); +++ __ resolve_oop_handle(obj); +++ } +++} +++ +++void TemplateTable::load_invoke_cp_cache_entry(int byte_no, +++ Register method, +++ Register itable_index, +++ Register flags, +++ bool is_invokevirtual, +++ bool is_invokevfinal, /*unused*/ +++ bool is_invokedynamic) { +++ // setup registers +++ const Register cache = t1; +++ const Register index = x14; +++ assert_different_registers(method, flags); +++ assert_different_registers(method, cache, index); +++ assert_different_registers(itable_index, flags); +++ assert_different_registers(itable_index, cache, index); +++ // determine constant pool cache field offsets +++ assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant"); +++ const int method_offset = in_bytes(ConstantPoolCache::base_offset() + +++ (is_invokevirtual ? +++ ConstantPoolCacheEntry::f2_offset() : +++ ConstantPoolCacheEntry::f1_offset())); +++ const int flags_offset = in_bytes(ConstantPoolCache::base_offset() + +++ ConstantPoolCacheEntry::flags_offset()); +++ // access constant pool cache fields +++ const int index_offset = in_bytes(ConstantPoolCache::base_offset() + +++ ConstantPoolCacheEntry::f2_offset()); +++ +++ const size_t index_size = (is_invokedynamic ? sizeof(u4) : sizeof(u2)); +++ resolve_cache_and_index(byte_no, cache, index, index_size); +++ __ ld(method, Address(cache, method_offset)); +++ +++ if (itable_index != noreg) { +++ __ ld(itable_index, Address(cache, index_offset)); +++ } +++ __ lwu(flags, Address(cache, flags_offset)); +++} +++ +++// The registers cache and index expected to be set before call. +++// Correct values of the cache and index registers are preserved. +++void TemplateTable::jvmti_post_field_access(Register cache, Register index, +++ bool is_static, bool has_tos) { +++ // do the JVMTI work here to avoid disturbing the register state below +++ // We use c_rarg registers here beacause we want to use the register used in +++ // the call to the VM +++ if (JvmtiExport::can_post_field_access()) { +++ // Check to see if a field access watch has been set before we +++ // take the time to call into the VM. +++ Label L1; +++ assert_different_registers(cache, index, x10); +++ ExternalAddress target((address) JvmtiExport::get_field_access_count_addr()); +++ __ relocate(target.rspec(), [&] { +++ int32_t offset; +++ __ la_patchable(t0, target, offset); +++ __ lwu(x10, Address(t0, offset)); +++ }); +++ +++ __ beqz(x10, L1); +++ +++ __ get_cache_and_index_at_bcp(c_rarg2, c_rarg3, 1); +++ __ la(c_rarg2, Address(c_rarg2, in_bytes(ConstantPoolCache::base_offset()))); +++ +++ if (is_static) { +++ __ mv(c_rarg1, zr); // NULL object reference +++ } else { +++ __ ld(c_rarg1, at_tos()); // get object pointer without popping it +++ __ verify_oop(c_rarg1); +++ } +++ // c_rarg1: object pointer or NULL +++ // c_rarg2: cache entry pointer +++ // c_rarg3: jvalue object on the stack +++ __ call_VM(noreg, CAST_FROM_FN_PTR(address, +++ InterpreterRuntime::post_field_access), +++ c_rarg1, c_rarg2, c_rarg3); +++ __ get_cache_and_index_at_bcp(cache, index, 1); +++ __ bind(L1); +++ } +++} +++ +++void TemplateTable::pop_and_check_object(Register r) +++{ +++ __ pop_ptr(r); +++ __ null_check(r); // for field access must check obj. +++ __ verify_oop(r); +++} +++ +++void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteControl rc) +++{ +++ const Register cache = x12; +++ const Register index = x13; +++ const Register obj = x14; +++ const Register off = x9; +++ const Register flags = x10; +++ const Register raw_flags = x16; +++ const Register bc = x14; // uses same reg as obj, so don't mix them +++ +++ resolve_cache_and_index(byte_no, cache, index, sizeof(u2)); +++ jvmti_post_field_access(cache, index, is_static, false); +++ load_field_cp_cache_entry(obj, cache, index, off, raw_flags, is_static); +++ +++ if (!is_static) { +++ // obj is on the stack +++ pop_and_check_object(obj); +++ } +++ +++ __ add(off, obj, off); +++ const Address field(off); +++ +++ Label Done, notByte, notBool, notInt, notShort, notChar, +++ notLong, notFloat, notObj, notDouble; +++ +++ __ slli(flags, raw_flags, XLEN - (ConstantPoolCacheEntry::tos_state_shift + +++ ConstantPoolCacheEntry::tos_state_bits)); +++ __ srli(flags, flags, XLEN - ConstantPoolCacheEntry::tos_state_bits); +++ +++ assert(btos == 0, "change code, btos != 0"); +++ __ bnez(flags, notByte); +++ +++ // Dont't rewrite getstatic, only getfield +++ if (is_static) { +++ rc = may_not_rewrite; +++ } +++ +++ // btos +++ __ access_load_at(T_BYTE, IN_HEAP, x10, field, noreg, noreg); +++ __ push(btos); +++ // Rewrite bytecode to be faster +++ if (rc == may_rewrite) { +++ patch_bytecode(Bytecodes::_fast_bgetfield, bc, x11); +++ } +++ __ j(Done); +++ +++ __ bind(notByte); +++ __ sub(t0, flags, (u1)ztos); +++ __ bnez(t0, notBool); +++ +++ // ztos (same code as btos) +++ __ access_load_at(T_BOOLEAN, IN_HEAP, x10, field, noreg, noreg); +++ __ push(ztos); +++ // Rewirte bytecode to be faster +++ if (rc == may_rewrite) { +++ // uses btos rewriting, no truncating to t/f bit is needed for getfield +++ patch_bytecode(Bytecodes::_fast_bgetfield, bc, x11); +++ } +++ __ j(Done); +++ +++ __ bind(notBool); +++ __ sub(t0, flags, (u1)atos); +++ __ bnez(t0, notObj); +++ // atos +++ do_oop_load(_masm, field, x10, IN_HEAP); +++ __ push(atos); +++ if (rc == may_rewrite) { +++ patch_bytecode(Bytecodes::_fast_agetfield, bc, x11); +++ } +++ __ j(Done); +++ +++ __ bind(notObj); +++ __ sub(t0, flags, (u1)itos); +++ __ bnez(t0, notInt); +++ // itos +++ __ access_load_at(T_INT, IN_HEAP, x10, field, noreg, noreg); +++ __ sign_extend(x10, x10, 32); +++ __ push(itos); +++ // Rewrite bytecode to be faster +++ if (rc == may_rewrite) { +++ patch_bytecode(Bytecodes::_fast_igetfield, bc, x11); +++ } +++ __ j(Done); +++ +++ __ bind(notInt); +++ __ sub(t0, flags, (u1)ctos); +++ __ bnez(t0, notChar); +++ // ctos +++ __ access_load_at(T_CHAR, IN_HEAP, x10, field, noreg, noreg); +++ __ push(ctos); +++ // Rewrite bytecode to be faster +++ if (rc == may_rewrite) { +++ patch_bytecode(Bytecodes::_fast_cgetfield, bc, x11); +++ } +++ __ j(Done); +++ +++ __ bind(notChar); +++ __ sub(t0, flags, (u1)stos); +++ __ bnez(t0, notShort); +++ // stos +++ __ access_load_at(T_SHORT, IN_HEAP, x10, field, noreg, noreg); +++ __ push(stos); +++ // Rewrite bytecode to be faster +++ if (rc == may_rewrite) { +++ patch_bytecode(Bytecodes::_fast_sgetfield, bc, x11); +++ } +++ __ j(Done); +++ +++ __ bind(notShort); +++ __ sub(t0, flags, (u1)ltos); +++ __ bnez(t0, notLong); +++ // ltos +++ __ access_load_at(T_LONG, IN_HEAP, x10, field, noreg, noreg); +++ __ push(ltos); +++ // Rewrite bytecode to be faster +++ if (rc == may_rewrite) { +++ patch_bytecode(Bytecodes::_fast_lgetfield, bc, x11); +++ } +++ __ j(Done); +++ +++ __ bind(notLong); +++ __ sub(t0, flags, (u1)ftos); +++ __ bnez(t0, notFloat); +++ // ftos +++ __ access_load_at(T_FLOAT, IN_HEAP, noreg /* ftos */, field, noreg, noreg); +++ __ push(ftos); +++ // Rewrite bytecode to be faster +++ if (rc == may_rewrite) { +++ patch_bytecode(Bytecodes::_fast_fgetfield, bc, x11); +++ } +++ __ j(Done); +++ +++ __ bind(notFloat); +++#ifdef ASSERT +++ __ sub(t0, flags, (u1)dtos); +++ __ bnez(t0, notDouble); +++#endif +++ // dtos +++ __ access_load_at(T_DOUBLE, IN_HEAP, noreg /* ftos */, field, noreg, noreg); +++ __ push(dtos); +++ // Rewrite bytecode to be faster +++ if (rc == may_rewrite) { +++ patch_bytecode(Bytecodes::_fast_dgetfield, bc, x11); +++ } +++#ifdef ASSERT +++ __ j(Done); +++ +++ __ bind(notDouble); +++ __ stop("Bad state"); +++#endif +++ +++ __ bind(Done); +++ +++ Label notVolatile; +++ __ test_bit(t0, raw_flags, ConstantPoolCacheEntry::is_volatile_shift); +++ __ beqz(t0, notVolatile); +++ __ membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore); +++ __ bind(notVolatile); +++} +++ +++void TemplateTable::getfield(int byte_no) +++{ +++ getfield_or_static(byte_no, false); +++} +++ +++void TemplateTable::nofast_getfield(int byte_no) { +++ getfield_or_static(byte_no, false, may_not_rewrite); +++} +++ +++void TemplateTable::getstatic(int byte_no) +++{ +++ getfield_or_static(byte_no, true); +++} +++ +++// The registers cache and index expected to be set before call. +++// The function may destroy various registers, just not the cache and index registers. +++void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is_static) { +++ transition(vtos, vtos); +++ +++ ByteSize cp_base_offset = ConstantPoolCache::base_offset(); +++ +++ if (JvmtiExport::can_post_field_modification()) { +++ // Check to see if a field modification watch has been set before +++ // we take the time to call into the VM. +++ Label L1; +++ assert_different_registers(cache, index, x10); +++ ExternalAddress target((address)JvmtiExport::get_field_modification_count_addr()); +++ __ relocate(target.rspec(), [&] { +++ int32_t offset; +++ __ la_patchable(t0, target, offset); +++ __ lwu(x10, Address(t0, offset)); +++ }); +++ __ beqz(x10, L1); +++ +++ __ get_cache_and_index_at_bcp(c_rarg2, t0, 1); +++ +++ if (is_static) { +++ // Life is simple. Null out the object pointer. +++ __ mv(c_rarg1, zr); +++ } else { +++ // Life is harder. The stack holds the value on top, followed by +++ // the object. We don't know the size of the value, though; it +++ // could be one or two words depending on its type. As a result, +++ // we must find the type to determine where the object is. +++ __ lwu(c_rarg3, Address(c_rarg2, +++ in_bytes(cp_base_offset + +++ ConstantPoolCacheEntry::flags_offset()))); +++ __ srli(c_rarg3, c_rarg3, ConstantPoolCacheEntry::tos_state_shift); +++ ConstantPoolCacheEntry::verify_tos_state_shift(); +++ Label nope2, done, ok; +++ __ ld(c_rarg1, at_tos_p1()); // initially assume a one word jvalue +++ __ sub(t0, c_rarg3, ltos); +++ __ beqz(t0, ok); +++ __ sub(t0, c_rarg3, dtos); +++ __ bnez(t0, nope2); +++ __ bind(ok); +++ __ ld(c_rarg1, at_tos_p2()); // ltos (two word jvalue); +++ __ bind(nope2); +++ } +++ // cache entry pointer +++ __ add(c_rarg2, c_rarg2, in_bytes(cp_base_offset)); +++ // object (tos) +++ __ mv(c_rarg3, esp); +++ // c_rarg1: object pointer set up above (NULL if static) +++ // c_rarg2: cache entry pointer +++ // c_rarg3: jvalue object on the stack +++ __ call_VM(noreg, +++ CAST_FROM_FN_PTR(address, +++ InterpreterRuntime::post_field_modification), +++ c_rarg1, c_rarg2, c_rarg3); +++ __ get_cache_and_index_at_bcp(cache, index, 1); +++ __ bind(L1); +++ } +++} +++ +++void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteControl rc) { +++ transition(vtos, vtos); +++ +++ const Register cache = x12; +++ const Register index = x13; +++ const Register obj = x12; +++ const Register off = x9; +++ const Register flags = x10; +++ const Register bc = x14; +++ +++ resolve_cache_and_index(byte_no, cache, index, sizeof(u2)); +++ jvmti_post_field_mod(cache, index, is_static); +++ load_field_cp_cache_entry(obj, cache, index, off, flags, is_static); +++ +++ Label Done; +++ __ mv(x15, flags); +++ +++ { +++ Label notVolatile; +++ __ test_bit(t0, x15, ConstantPoolCacheEntry::is_volatile_shift); +++ __ beqz(t0, notVolatile); +++ __ membar(MacroAssembler::StoreStore | MacroAssembler::LoadStore); +++ __ bind(notVolatile); +++ } +++ +++ Label notByte, notBool, notInt, notShort, notChar, +++ notLong, notFloat, notObj, notDouble; +++ +++ __ slli(flags, flags, XLEN - (ConstantPoolCacheEntry::tos_state_shift + +++ ConstantPoolCacheEntry::tos_state_bits)); +++ __ srli(flags, flags, XLEN - ConstantPoolCacheEntry::tos_state_bits); +++ +++ assert(btos == 0, "change code, btos != 0"); +++ __ bnez(flags, notByte); +++ +++ // Don't rewrite putstatic, only putfield +++ if (is_static) { +++ rc = may_not_rewrite; +++ } +++ +++ // btos +++ { +++ __ pop(btos); +++ // field address +++ if (!is_static) { +++ pop_and_check_object(obj); +++ } +++ __ add(off, obj, off); // if static, obj from cache, else obj from stack. +++ const Address field(off, 0); // off register as temparator register. +++ __ access_store_at(T_BYTE, IN_HEAP, field, x10, noreg, noreg); +++ if (rc == may_rewrite) { +++ patch_bytecode(Bytecodes::_fast_bputfield, bc, x11, true, byte_no); +++ } +++ __ j(Done); +++ } +++ +++ __ bind(notByte); +++ __ sub(t0, flags, (u1)ztos); +++ __ bnez(t0, notBool); +++ +++ // ztos +++ { +++ __ pop(ztos); +++ // field address +++ if (!is_static) { +++ pop_and_check_object(obj); +++ } +++ __ add(off, obj, off); // if static, obj from cache, else obj from stack. +++ const Address field(off, 0); +++ __ access_store_at(T_BOOLEAN, IN_HEAP, field, x10, noreg, noreg); +++ if (rc == may_rewrite) { +++ patch_bytecode(Bytecodes::_fast_zputfield, bc, x11, true, byte_no); +++ } +++ __ j(Done); +++ } +++ +++ __ bind(notBool); +++ __ sub(t0, flags, (u1)atos); +++ __ bnez(t0, notObj); +++ +++ // atos +++ { +++ __ pop(atos); +++ // field address +++ if (!is_static) { +++ pop_and_check_object(obj); +++ } +++ __ add(off, obj, off); // if static, obj from cache, else obj from stack. +++ const Address field(off, 0); +++ // Store into the field +++ do_oop_store(_masm, field, x10, IN_HEAP); +++ if (rc == may_rewrite) { +++ patch_bytecode(Bytecodes::_fast_aputfield, bc, x11, true, byte_no); +++ } +++ __ j(Done); +++ } +++ +++ __ bind(notObj); +++ __ sub(t0, flags, (u1)itos); +++ __ bnez(t0, notInt); +++ +++ // itos +++ { +++ __ pop(itos); +++ // field address +++ if (!is_static) { +++ pop_and_check_object(obj); +++ } +++ __ add(off, obj, off); // if static, obj from cache, else obj from stack. +++ const Address field(off, 0); +++ __ access_store_at(T_INT, IN_HEAP, field, x10, noreg, noreg); +++ if (rc == may_rewrite) { +++ patch_bytecode(Bytecodes::_fast_iputfield, bc, x11, true, byte_no); +++ } +++ __ j(Done); +++ } +++ +++ __ bind(notInt); +++ __ sub(t0, flags, (u1)ctos); +++ __ bnez(t0, notChar); +++ +++ // ctos +++ { +++ __ pop(ctos); +++ // field address +++ if (!is_static) { +++ pop_and_check_object(obj); +++ } +++ __ add(off, obj, off); // if static, obj from cache, else obj from stack. +++ const Address field(off, 0); +++ __ access_store_at(T_CHAR, IN_HEAP, field, x10, noreg, noreg); +++ if (rc == may_rewrite) { +++ patch_bytecode(Bytecodes::_fast_cputfield, bc, x11, true, byte_no); +++ } +++ __ j(Done); +++ } +++ +++ __ bind(notChar); +++ __ sub(t0, flags, (u1)stos); +++ __ bnez(t0, notShort); +++ +++ // stos +++ { +++ __ pop(stos); +++ // field address +++ if (!is_static) { +++ pop_and_check_object(obj); +++ } +++ __ add(off, obj, off); // if static, obj from cache, else obj from stack. +++ const Address field(off, 0); +++ __ access_store_at(T_SHORT, IN_HEAP, field, x10, noreg, noreg); +++ if (rc == may_rewrite) { +++ patch_bytecode(Bytecodes::_fast_sputfield, bc, x11, true, byte_no); +++ } +++ __ j(Done); +++ } +++ +++ __ bind(notShort); +++ __ sub(t0, flags, (u1)ltos); +++ __ bnez(t0, notLong); +++ +++ // ltos +++ { +++ __ pop(ltos); +++ // field address +++ if (!is_static) { +++ pop_and_check_object(obj); +++ } +++ __ add(off, obj, off); // if static, obj from cache, else obj from stack. +++ const Address field(off, 0); +++ __ access_store_at(T_LONG, IN_HEAP, field, x10, noreg, noreg); +++ if (rc == may_rewrite) { +++ patch_bytecode(Bytecodes::_fast_lputfield, bc, x11, true, byte_no); +++ } +++ __ j(Done); +++ } +++ +++ __ bind(notLong); +++ __ sub(t0, flags, (u1)ftos); +++ __ bnez(t0, notFloat); +++ +++ // ftos +++ { +++ __ pop(ftos); +++ // field address +++ if (!is_static) { +++ pop_and_check_object(obj); +++ } +++ __ add(off, obj, off); // if static, obj from cache, else obj from stack. +++ const Address field(off, 0); +++ __ access_store_at(T_FLOAT, IN_HEAP, field, noreg /* ftos */, noreg, noreg); +++ if (rc == may_rewrite) { +++ patch_bytecode(Bytecodes::_fast_fputfield, bc, x11, true, byte_no); +++ } +++ __ j(Done); +++ } +++ +++ __ bind(notFloat); +++#ifdef ASSERT +++ __ sub(t0, flags, (u1)dtos); +++ __ bnez(t0, notDouble); +++#endif +++ +++ // dtos +++ { +++ __ pop(dtos); +++ // field address +++ if (!is_static) { +++ pop_and_check_object(obj); +++ } +++ __ add(off, obj, off); // if static, obj from cache, else obj from stack. +++ const Address field(off, 0); +++ __ access_store_at(T_DOUBLE, IN_HEAP, field, noreg /* dtos */, noreg, noreg); +++ if (rc == may_rewrite) { +++ patch_bytecode(Bytecodes::_fast_dputfield, bc, x11, true, byte_no); +++ } +++ } +++ +++#ifdef ASSERT +++ __ j(Done); +++ +++ __ bind(notDouble); +++ __ stop("Bad state"); +++#endif +++ +++ __ bind(Done); +++ +++ { +++ Label notVolatile; +++ __ test_bit(t0, x15, ConstantPoolCacheEntry::is_volatile_shift); +++ __ beqz(t0, notVolatile); +++ __ membar(MacroAssembler::StoreLoad | MacroAssembler::StoreStore); +++ __ bind(notVolatile); +++ } +++} +++ +++void TemplateTable::putfield(int byte_no) +++{ +++ putfield_or_static(byte_no, false); +++} +++ +++void TemplateTable::nofast_putfield(int byte_no) { +++ putfield_or_static(byte_no, false, may_not_rewrite); +++} +++ +++void TemplateTable::putstatic(int byte_no) { +++ putfield_or_static(byte_no, true); +++} +++ +++void TemplateTable::jvmti_post_fast_field_mod() +++{ +++ if (JvmtiExport::can_post_field_modification()) { +++ // Check to see if a field modification watch has been set before +++ // we take the time to call into the VM. +++ Label L2; +++ ExternalAddress target((address)JvmtiExport::get_field_modification_count_addr()); +++ __ relocate(target.rspec(), [&] { +++ int32_t offset; +++ __ la_patchable(t0, target, offset); +++ __ lwu(c_rarg3, Address(t0, offset)); +++ }); +++ __ beqz(c_rarg3, L2); +++ __ pop_ptr(x9); // copy the object pointer from tos +++ __ verify_oop(x9); +++ __ push_ptr(x9); // put the object pointer back on tos +++ // Save tos values before call_VM() clobbers them. Since we have +++ // to do it for every data type, we use the saved values as the +++ // jvalue object. +++ switch (bytecode()) { // load values into the jvalue object +++ case Bytecodes::_fast_aputfield: __ push_ptr(x10); break; +++ case Bytecodes::_fast_bputfield: // fall through +++ case Bytecodes::_fast_zputfield: // fall through +++ case Bytecodes::_fast_sputfield: // fall through +++ case Bytecodes::_fast_cputfield: // fall through +++ case Bytecodes::_fast_iputfield: __ push_i(x10); break; +++ case Bytecodes::_fast_dputfield: __ push_d(); break; +++ case Bytecodes::_fast_fputfield: __ push_f(); break; +++ case Bytecodes::_fast_lputfield: __ push_l(x10); break; +++ +++ default: +++ ShouldNotReachHere(); +++ } +++ __ mv(c_rarg3, esp); // points to jvalue on the stack +++ // access constant pool cache entry +++ __ get_cache_entry_pointer_at_bcp(c_rarg2, x10, 1); +++ __ verify_oop(x9); +++ // x9: object pointer copied above +++ // c_rarg2: cache entry pointer +++ // c_rarg3: jvalue object on the stack +++ __ call_VM(noreg, +++ CAST_FROM_FN_PTR(address, +++ InterpreterRuntime::post_field_modification), +++ x9, c_rarg2, c_rarg3); +++ +++ switch (bytecode()) { // restore tos values +++ case Bytecodes::_fast_aputfield: __ pop_ptr(x10); break; +++ case Bytecodes::_fast_bputfield: // fall through +++ case Bytecodes::_fast_zputfield: // fall through +++ case Bytecodes::_fast_sputfield: // fall through +++ case Bytecodes::_fast_cputfield: // fall through +++ case Bytecodes::_fast_iputfield: __ pop_i(x10); break; +++ case Bytecodes::_fast_dputfield: __ pop_d(); break; +++ case Bytecodes::_fast_fputfield: __ pop_f(); break; +++ case Bytecodes::_fast_lputfield: __ pop_l(x10); break; +++ default: break; +++ } +++ __ bind(L2); +++ } +++} +++ +++void TemplateTable::fast_storefield(TosState state) +++{ +++ transition(state, vtos); +++ +++ ByteSize base = ConstantPoolCache::base_offset(); +++ +++ jvmti_post_fast_field_mod(); +++ +++ // access constant pool cache +++ __ get_cache_and_index_at_bcp(x12, x11, 1); +++ +++ // Must prevent reordering of the following cp cache loads with bytecode load +++ __ membar(MacroAssembler::LoadLoad); +++ +++ // test for volatile with x13 +++ __ lwu(x13, Address(x12, in_bytes(base + +++ ConstantPoolCacheEntry::flags_offset()))); +++ +++ // replace index with field offset from cache entry +++ __ ld(x11, Address(x12, in_bytes(base + ConstantPoolCacheEntry::f2_offset()))); +++ +++ { +++ Label notVolatile; +++ __ test_bit(t0, x13, ConstantPoolCacheEntry::is_volatile_shift); +++ __ beqz(t0, notVolatile); +++ __ membar(MacroAssembler::StoreStore | MacroAssembler::LoadStore); +++ __ bind(notVolatile); +++ } +++ +++ // Get object from stack +++ pop_and_check_object(x12); +++ +++ // field address +++ __ add(x11, x12, x11); +++ const Address field(x11, 0); +++ +++ // access field +++ switch (bytecode()) { +++ case Bytecodes::_fast_aputfield: +++ do_oop_store(_masm, field, x10, IN_HEAP); +++ break; +++ case Bytecodes::_fast_lputfield: +++ __ access_store_at(T_LONG, IN_HEAP, field, x10, noreg, noreg); +++ break; +++ case Bytecodes::_fast_iputfield: +++ __ access_store_at(T_INT, IN_HEAP, field, x10, noreg, noreg); +++ break; +++ case Bytecodes::_fast_zputfield: +++ __ access_store_at(T_BOOLEAN, IN_HEAP, field, x10, noreg, noreg); +++ break; +++ case Bytecodes::_fast_bputfield: +++ __ access_store_at(T_BYTE, IN_HEAP, field, x10, noreg, noreg); +++ break; +++ case Bytecodes::_fast_sputfield: +++ __ access_store_at(T_SHORT, IN_HEAP, field, x10, noreg, noreg); +++ break; +++ case Bytecodes::_fast_cputfield: +++ __ access_store_at(T_CHAR, IN_HEAP, field, x10, noreg, noreg); +++ break; +++ case Bytecodes::_fast_fputfield: +++ __ access_store_at(T_FLOAT, IN_HEAP, field, noreg /* ftos */, noreg, noreg); +++ break; +++ case Bytecodes::_fast_dputfield: +++ __ access_store_at(T_DOUBLE, IN_HEAP, field, noreg /* dtos */, noreg, noreg); +++ break; +++ default: +++ ShouldNotReachHere(); +++ } +++ +++ { +++ Label notVolatile; +++ __ test_bit(t0, x13, ConstantPoolCacheEntry::is_volatile_shift); +++ __ beqz(t0, notVolatile); +++ __ membar(MacroAssembler::StoreLoad | MacroAssembler::StoreStore); +++ __ bind(notVolatile); +++ } +++} +++ +++void TemplateTable::fast_accessfield(TosState state) +++{ +++ transition(atos, state); +++ // Do the JVMTI work here to avoid disturbing the register state below +++ if (JvmtiExport::can_post_field_access()) { +++ // Check to see if a field access watch has been set before we +++ // take the time to call into the VM. +++ Label L1; +++ ExternalAddress target((address)JvmtiExport::get_field_access_count_addr()); +++ __ relocate(target.rspec(), [&] { +++ int32_t offset; +++ __ la_patchable(t0, target, offset); +++ __ lwu(x12, Address(t0, offset)); +++ }); +++ __ beqz(x12, L1); +++ // access constant pool cache entry +++ __ get_cache_entry_pointer_at_bcp(c_rarg2, t1, 1); +++ __ verify_oop(x10); +++ __ push_ptr(x10); // save object pointer before call_VM() clobbers it +++ __ mv(c_rarg1, x10); +++ // c_rarg1: object pointer copied above +++ // c_rarg2: cache entry pointer +++ __ call_VM(noreg, +++ CAST_FROM_FN_PTR(address, +++ InterpreterRuntime::post_field_access), +++ c_rarg1, c_rarg2); +++ __ pop_ptr(x10); // restore object pointer +++ __ bind(L1); +++ } +++ +++ // access constant pool cache +++ __ get_cache_and_index_at_bcp(x12, x11, 1); +++ +++ // Must prevent reordering of the following cp cache loads with bytecode load +++ __ membar(MacroAssembler::LoadLoad); +++ +++ __ ld(x11, Address(x12, in_bytes(ConstantPoolCache::base_offset() + +++ ConstantPoolCacheEntry::f2_offset()))); +++ __ lwu(x13, Address(x12, in_bytes(ConstantPoolCache::base_offset() + +++ ConstantPoolCacheEntry::flags_offset()))); +++ +++ // x10: object +++ __ verify_oop(x10); +++ __ null_check(x10); +++ __ add(x11, x10, x11); +++ const Address field(x11, 0); +++ +++ // access field +++ switch (bytecode()) { +++ case Bytecodes::_fast_agetfield: +++ do_oop_load(_masm, field, x10, IN_HEAP); +++ __ verify_oop(x10); +++ break; +++ case Bytecodes::_fast_lgetfield: +++ __ access_load_at(T_LONG, IN_HEAP, x10, field, noreg, noreg); +++ break; +++ case Bytecodes::_fast_igetfield: +++ __ access_load_at(T_INT, IN_HEAP, x10, field, noreg, noreg); +++ __ sign_extend(x10, x10, 32); +++ break; +++ case Bytecodes::_fast_bgetfield: +++ __ access_load_at(T_BYTE, IN_HEAP, x10, field, noreg, noreg); +++ break; +++ case Bytecodes::_fast_sgetfield: +++ __ access_load_at(T_SHORT, IN_HEAP, x10, field, noreg, noreg); +++ break; +++ case Bytecodes::_fast_cgetfield: +++ __ access_load_at(T_CHAR, IN_HEAP, x10, field, noreg, noreg); +++ break; +++ case Bytecodes::_fast_fgetfield: +++ __ access_load_at(T_FLOAT, IN_HEAP, noreg /* ftos */, field, noreg, noreg); +++ break; +++ case Bytecodes::_fast_dgetfield: +++ __ access_load_at(T_DOUBLE, IN_HEAP, noreg /* dtos */, field, noreg, noreg); +++ break; +++ default: +++ ShouldNotReachHere(); +++ } +++ { +++ Label notVolatile; +++ __ test_bit(t0, x13, ConstantPoolCacheEntry::is_volatile_shift); +++ __ beqz(t0, notVolatile); +++ __ membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore); +++ __ bind(notVolatile); +++ } +++} +++ +++void TemplateTable::fast_xaccess(TosState state) +++{ +++ transition(vtos, state); +++ +++ // get receiver +++ __ ld(x10, aaddress(0)); +++ // access constant pool cache +++ __ get_cache_and_index_at_bcp(x12, x13, 2); +++ __ ld(x11, Address(x12, in_bytes(ConstantPoolCache::base_offset() + +++ ConstantPoolCacheEntry::f2_offset()))); +++ +++ // make sure exception is reported in correct bcp range (getfield is +++ // next instruction) +++ __ addi(xbcp, xbcp, 1); +++ __ null_check(x10); +++ switch (state) { +++ case itos: +++ __ add(x10, x10, x11); +++ __ access_load_at(T_INT, IN_HEAP, x10, Address(x10, 0), noreg, noreg); +++ __ sign_extend(x10, x10, 32); +++ break; +++ case atos: +++ __ add(x10, x10, x11); +++ do_oop_load(_masm, Address(x10, 0), x10, IN_HEAP); +++ __ verify_oop(x10); +++ break; +++ case ftos: +++ __ add(x10, x10, x11); +++ __ access_load_at(T_FLOAT, IN_HEAP, noreg /* ftos */, Address(x10), noreg, noreg); +++ break; +++ default: +++ ShouldNotReachHere(); +++ } +++ +++ { +++ Label notVolatile; +++ __ lwu(x13, Address(x12, in_bytes(ConstantPoolCache::base_offset() + +++ ConstantPoolCacheEntry::flags_offset()))); +++ __ test_bit(t0, x13, ConstantPoolCacheEntry::is_volatile_shift); +++ __ beqz(t0, notVolatile); +++ __ membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore); +++ __ bind(notVolatile); +++ } +++ +++ __ sub(xbcp, xbcp, 1); +++} +++ +++//----------------------------------------------------------------------------- +++// Calls +++ +++void TemplateTable::prepare_invoke(int byte_no, +++ Register method, // linked method (or i-klass) +++ Register index, // itable index, MethodType, etc. +++ Register recv, // if caller wants to see it +++ Register flags // if caller wants to test it +++ ) { +++ // determine flags +++ const Bytecodes::Code code = bytecode(); +++ const bool is_invokeinterface = code == Bytecodes::_invokeinterface; +++ const bool is_invokedynamic = code == Bytecodes::_invokedynamic; +++ const bool is_invokehandle = code == Bytecodes::_invokehandle; +++ const bool is_invokevirtual = code == Bytecodes::_invokevirtual; +++ const bool is_invokespecial = code == Bytecodes::_invokespecial; +++ const bool load_receiver = (recv != noreg); +++ const bool save_flags = (flags != noreg); +++ assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), ""); +++ assert(save_flags == (is_invokeinterface || is_invokevirtual), "need flags for vfinal"); +++ assert(flags == noreg || flags == x13, ""); +++ assert(recv == noreg || recv == x12, ""); +++ +++ // setup registers & access constant pool cache +++ if (recv == noreg) { +++ recv = x12; +++ } +++ if (flags == noreg) { +++ flags = x13; +++ } +++ assert_different_registers(method, index, recv, flags); +++ +++ // save 'interpreter return address' +++ __ save_bcp(); +++ +++ load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic); +++ +++ // maybe push appendix to arguments (just before return address) +++ if (is_invokedynamic || is_invokehandle) { +++ Label L_no_push; +++ __ test_bit(t0, flags, ConstantPoolCacheEntry::has_appendix_shift); +++ __ beqz(t0, L_no_push); +++ // Push the appendix as a trailing parameter. +++ // This must be done before we get the receiver, +++ // since the parameter_size includes it. +++ __ push_reg(x9); +++ __ mv(x9, index); +++ __ load_resolved_reference_at_index(index, x9); +++ __ pop_reg(x9); +++ __ push_reg(index); // push appendix (MethodType, CallSite, etc.) +++ __ bind(L_no_push); +++ } +++ +++ // load receiver if needed (note: no return address pushed yet) +++ if (load_receiver) { +++ __ andi(recv, flags, ConstantPoolCacheEntry::parameter_size_mask); // parameter_size_mask = 1 << 8 +++ __ shadd(t0, recv, esp, t0, 3); +++ __ ld(recv, Address(t0, -Interpreter::expr_offset_in_bytes(1))); +++ __ verify_oop(recv); +++ } +++ +++ // compute return type +++ __ slli(t1, flags, XLEN - (ConstantPoolCacheEntry::tos_state_shift + ConstantPoolCacheEntry::tos_state_bits)); +++ __ srli(t1, t1, XLEN - ConstantPoolCacheEntry::tos_state_bits); // (1 << 5) - 4 --> 28~31==> t1:0~3 +++ +++ // load return address +++ { +++ const address table_addr = (address) Interpreter::invoke_return_entry_table_for(code); +++ __ mv(t0, table_addr); +++ __ shadd(t0, t1, t0, t1, 3); +++ __ ld(ra, Address(t0, 0)); +++ } +++} +++ +++void TemplateTable::invokevirtual_helper(Register index, +++ Register recv, +++ Register flags) +++{ +++ // Uses temporary registers x10, x13 +++ assert_different_registers(index, recv, x10, x13); +++ // Test for an invoke of a final method +++ Label notFinal; +++ __ test_bit(t0, flags, ConstantPoolCacheEntry::is_vfinal_shift); +++ __ beqz(t0, notFinal); +++ +++ const Register method = index; // method must be xmethod +++ assert(method == xmethod, "Method must be xmethod for interpreter calling convention"); +++ +++ // do the call - the index is actually the method to call +++ // that is, f2 is a vtable index if !is_vfinal, else f2 is a Method* +++ +++ // It's final, need a null check here! +++ __ null_check(recv); +++ +++ // profile this call +++ __ profile_final_call(x10); +++ __ profile_arguments_type(x10, method, x14, true); +++ +++ __ jump_from_interpreted(method); +++ +++ __ bind(notFinal); +++ +++ // get receiver klass +++ __ null_check(recv, oopDesc::klass_offset_in_bytes()); +++ __ load_klass(x10, recv); +++ +++ // profile this call +++ __ profile_virtual_call(x10, xlocals, x13); +++ +++ // get target Method & entry point +++ __ lookup_virtual_method(x10, index, method); +++ __ profile_arguments_type(x13, method, x14, true); +++ __ jump_from_interpreted(method); +++} +++ +++void TemplateTable::invokevirtual(int byte_no) +++{ +++ transition(vtos, vtos); +++ assert(byte_no == f2_byte, "use this argument"); +++ +++ prepare_invoke(byte_no, xmethod, noreg, x12, x13); +++ +++ // xmethod: index (actually a Method*) +++ // x12: receiver +++ // x13: flags +++ +++ invokevirtual_helper(xmethod, x12, x13); +++} +++ +++void TemplateTable::invokespecial(int byte_no) +++{ +++ transition(vtos, vtos); +++ assert(byte_no == f1_byte, "use this argument"); +++ +++ prepare_invoke(byte_no, xmethod, noreg, // get f1 Method* +++ x12); // get receiver also for null check +++ __ verify_oop(x12); +++ __ null_check(x12); +++ // do the call +++ __ profile_call(x10); +++ __ profile_arguments_type(x10, xmethod, xbcp, false); +++ __ jump_from_interpreted(xmethod); +++} +++ +++void TemplateTable::invokestatic(int byte_no) +++{ +++ transition(vtos, vtos); +++ assert(byte_no == f1_byte, "use this arugment"); +++ +++ prepare_invoke(byte_no, xmethod); // get f1 Method* +++ // do the call +++ __ profile_call(x10); +++ __ profile_arguments_type(x10, xmethod, x14, false); +++ __ jump_from_interpreted(xmethod); +++} +++ +++void TemplateTable::fast_invokevfinal(int byte_no) +++{ +++ __ call_Unimplemented(); +++} +++ +++void TemplateTable::invokeinterface(int byte_no) { +++ transition(vtos, vtos); +++ assert(byte_no == f1_byte, "use this argument"); +++ +++ prepare_invoke(byte_no, x10, xmethod, // get f1 Klass*, f2 Method* +++ x12, x13); // recv, flags +++ +++ // x10: interface klass (from f1) +++ // xmethod: method (from f2) +++ // x12: receiver +++ // x13: flags +++ +++ // First check for Object case, then private interface method, +++ // then regular interface method. +++ +++ // Special case of invokeinterface called for virtual method of +++ // java.lang.Object. See cpCache.cpp for details +++ Label notObjectMethod; +++ __ test_bit(t0, x13, ConstantPoolCacheEntry::is_forced_virtual_shift); +++ __ beqz(t0, notObjectMethod); +++ +++ invokevirtual_helper(xmethod, x12, x13); +++ __ bind(notObjectMethod); +++ +++ Label no_such_interface; +++ +++ // Check for private method invocation - indicated by vfinal +++ Label notVFinal; +++ __ test_bit(t0, x13, ConstantPoolCacheEntry::is_vfinal_shift); +++ __ beqz(t0, notVFinal); +++ +++ // Check receiver klass into x13 - also a null check +++ __ null_check(x12, oopDesc::klass_offset_in_bytes()); +++ __ load_klass(x13, x12); +++ +++ Label subtype; +++ __ check_klass_subtype(x13, x10, x14, subtype); +++ // If we get here the typecheck failed +++ __ j(no_such_interface); +++ __ bind(subtype); +++ +++ __ profile_final_call(x10); +++ __ profile_arguments_type(x10, xmethod, x14, true); +++ __ jump_from_interpreted(xmethod); +++ +++ __ bind(notVFinal); +++ +++ // Get receiver klass into x13 - also a null check +++ __ restore_locals(); +++ __ null_check(x12, oopDesc::klass_offset_in_bytes()); +++ __ load_klass(x13, x12); +++ +++ Label no_such_method; +++ +++ // Preserve method for the throw_AbstractMethodErrorVerbose. +++ __ mv(x28, xmethod); +++ // Receiver subtype check against REFC. +++ // Superklass in x10. Subklass in x13. Blows t1, x30 +++ __ lookup_interface_method(// inputs: rec. class, interface, itable index +++ x13, x10, noreg, +++ // outputs: scan temp. reg, scan temp. reg +++ t1, x30, +++ no_such_interface, +++ /*return_method=*/false); +++ +++ // profile this call +++ __ profile_virtual_call(x13, x30, x9); +++ +++ // Get declaring interface class from method, and itable index +++ __ load_method_holder(x10, xmethod); +++ __ lwu(xmethod, Address(xmethod, Method::itable_index_offset())); +++ __ subw(xmethod, xmethod, Method::itable_index_max); +++ __ negw(xmethod, xmethod); +++ +++ // Preserve recvKlass for throw_AbstractMethodErrorVerbose +++ __ mv(xlocals, x13); +++ __ lookup_interface_method(// inputs: rec. class, interface, itable index +++ xlocals, x10, xmethod, +++ // outputs: method, scan temp. reg +++ xmethod, x30, +++ no_such_interface); +++ +++ // xmethod: Method to call +++ // x12: receiver +++ // Check for abstract method error +++ // Note: This should be done more efficiently via a throw_abstract_method_error +++ // interpreter entry point and a conditional jump to it in case of a null +++ // method. +++ __ beqz(xmethod, no_such_method); +++ +++ __ profile_arguments_type(x13, xmethod, x30, true); +++ +++ // do the call +++ // x12: receiver +++ // xmethod: Method +++ __ jump_from_interpreted(xmethod); +++ __ should_not_reach_here(); +++ +++ // exception handling code follows ... +++ // note: must restore interpreter registers to canonical +++ // state for exception handling to work correctly! +++ +++ __ bind(no_such_method); +++ // throw exception +++ __ restore_bcp(); // bcp must be correct for exception handler (was destroyed) +++ __ restore_locals(); // make sure locals pointer is correct as well (was destroyed) +++ // Pass arguments for generating a verbose error message. +++ __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodErrorVerbose), x13, x28); +++ // the call_VM checks for exception, so we should never return here. +++ __ should_not_reach_here(); +++ +++ __ bind(no_such_interface); +++ // throw exceptiong +++ __ restore_bcp(); // bcp must be correct for exception handler (was destroyed) +++ __ restore_locals(); // make sure locals pointer is correct as well (was destroyed) +++ // Pass arguments for generating a verbose error message. +++ __ call_VM(noreg, CAST_FROM_FN_PTR(address, +++ InterpreterRuntime::throw_IncompatibleClassChangeErrorVerbose), x13, x10); +++ // the call_VM checks for exception, so we should never return here. +++ __ should_not_reach_here(); +++ return; +++} +++ +++void TemplateTable::invokehandle(int byte_no) { +++ transition(vtos, vtos); +++ assert(byte_no == f1_byte, "use this argument"); +++ +++ prepare_invoke(byte_no, xmethod, x10, x12); +++ __ verify_method_ptr(x12); +++ __ verify_oop(x12); +++ __ null_check(x12); +++ +++ // FIXME: profile the LambdaForm also +++ +++ // x30 is safe to use here as a temp reg because it is about to +++ // be clobbered by jump_from_interpreted(). +++ __ profile_final_call(x30); +++ __ profile_arguments_type(x30, xmethod, x14, true); +++ +++ __ jump_from_interpreted(xmethod); +++} +++ +++void TemplateTable::invokedynamic(int byte_no) { +++ transition(vtos, vtos); +++ assert(byte_no == f1_byte, "use this argument"); +++ +++ prepare_invoke(byte_no, xmethod, x10); +++ +++ // x10: CallSite object (from cpool->resolved_references[]) +++ // xmethod: MH.linkToCallSite method (from f2) +++ +++ // Note: x10_callsite is already pushed by prepare_invoke +++ +++ // %%% should make a type profile for any invokedynamic that takes a ref argument +++ // profile this call +++ __ profile_call(xbcp); +++ __ profile_arguments_type(x13, xmethod, x30, false); +++ +++ __ verify_oop(x10); +++ +++ __ jump_from_interpreted(xmethod); +++} +++ +++//----------------------------------------------------------------------------- +++// Allocation +++ +++void TemplateTable::_new() { +++ transition(vtos, atos); +++ +++ __ get_unsigned_2_byte_index_at_bcp(x13, 1); +++ Label slow_case; +++ Label done; +++ Label initialize_header; +++ Label initialize_object; // including clearing the fields +++ +++ __ get_cpool_and_tags(x14, x10); +++ // Make sure the class we're about to instantiate has been resolved. +++ // This is done before loading InstanceKlass to be consistent with the order +++ // how Constant Pool is update (see ConstantPool::klass_at_put) +++ const int tags_offset = Array::base_offset_in_bytes(); +++ __ add(t0, x10, x13); +++ __ la(t0, Address(t0, tags_offset)); +++ __ membar(MacroAssembler::AnyAny); +++ __ lbu(t0, t0); +++ __ membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore); +++ __ sub(t1, t0, (u1)JVM_CONSTANT_Class); +++ __ bnez(t1, slow_case); +++ +++ // get InstanceKlass +++ __ load_resolved_klass_at_offset(x14, x13, x14, t0); +++ +++ // make sure klass is initialized & doesn't have finalizer +++ // make sure klass is fully initialized +++ __ lbu(t0, Address(x14, InstanceKlass::init_state_offset())); +++ __ sub(t1, t0, (u1)InstanceKlass::fully_initialized); +++ __ bnez(t1, slow_case); +++ +++ // get instance_size in InstanceKlass (scaled to a count of bytes) +++ __ lwu(x13, Address(x14, Klass::layout_helper_offset())); +++ // test to see if it has a finalizer or is malformed in some way +++ __ test_bit(t0, x13, exact_log2(Klass::_lh_instance_slow_path_bit)); +++ __ bnez(t0, slow_case); +++ +++ // Allocate the instance: +++ // If TLAB is enabled: +++ // Try to allocate in the TLAB. +++ // If fails, go to the slow path. +++ // Else If inline contiguous allocations are enabled: +++ // Try to allocate in eden. +++ // If fails due to heap end, go to slow path +++ // +++ // If TLAB is enabled OR inline contiguous is enabled: +++ // Initialize the allocation. +++ // Exit. +++ // Go to slow path. +++ const bool allow_shared_alloc = Universe::heap()->supports_inline_contig_alloc(); +++ +++ if (UseTLAB) { +++ __ tlab_allocate(x10, x13, 0, noreg, x11, slow_case); +++ +++ if (ZeroTLAB) { +++ // the fields have been already cleared +++ __ j(initialize_header); +++ } else { +++ // initialize both the header and fields +++ __ j(initialize_object); +++ } +++ } else { +++ // Allocation in the shared Eden, if allowed. +++ // +++ // x13: instance size in bytes +++ if (allow_shared_alloc) { +++ __ eden_allocate(x10, x13, 0, x28, slow_case); +++ } +++ } +++ +++ // If USETLAB or allow_shared_alloc are true, the object is created above and +++ // there is an initialized need. Otherwise, skip and go to the slow path. +++ if (UseTLAB || allow_shared_alloc) { +++ // The object is initialized before the header. If the object size is +++ // zero, go directly to the header initialization. +++ __ bind(initialize_object); +++ __ sub(x13, x13, sizeof(oopDesc)); +++ __ beqz(x13, initialize_header); +++ +++ // Initialize obejct fields +++ { +++ __ add(x12, x10, sizeof(oopDesc)); +++ Label loop; +++ __ bind(loop); +++ __ sd(zr, Address(x12)); +++ __ add(x12, x12, BytesPerLong); +++ __ sub(x13, x13, BytesPerLong); +++ __ bnez(x13, loop); +++ } +++ +++ // initialize object hader only. +++ __ bind(initialize_header); +++ if (UseBiasedLocking) { +++ __ ld(t0, Address(x14, Klass::prototype_header_offset())); +++ } else { +++ __ mv(t0, (intptr_t)markWord::prototype().value()); +++ } +++ __ sd(t0, Address(x10, oopDesc::mark_offset_in_bytes())); +++ __ store_klass_gap(x10, zr); // zero klass gap for compressed oops +++ __ store_klass(x10, x14); // store klass last +++ +++ { +++ SkipIfEqual skip(_masm, &DTraceAllocProbes, false); +++ // Trigger dtrace event for fastpath +++ __ push(atos); // save the return value +++ __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), x10); +++ __ pop(atos); // restore the return value +++ } +++ __ j(done); +++ } +++ +++ // slow case +++ __ bind(slow_case); +++ __ get_constant_pool(c_rarg1); +++ __ get_unsigned_2_byte_index_at_bcp(c_rarg2, 1); +++ call_VM(x10, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), c_rarg1, c_rarg2); +++ __ verify_oop(x10); +++ +++ // continue +++ __ bind(done); +++ // Must prevent reordering of stores for object initialization with stores that publish the new object. +++ __ membar(MacroAssembler::StoreStore); +++} +++ +++void TemplateTable::newarray() { +++ transition(itos, atos); +++ __ load_unsigned_byte(c_rarg1, at_bcp(1)); +++ __ mv(c_rarg2, x10); +++ call_VM(x10, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray), +++ c_rarg1, c_rarg2); +++ // Must prevent reordering of stores for object initialization with stores that publish the new object. +++ __ membar(MacroAssembler::StoreStore); +++} +++ +++void TemplateTable::anewarray() { +++ transition(itos, atos); +++ __ get_unsigned_2_byte_index_at_bcp(c_rarg2, 1); +++ __ get_constant_pool(c_rarg1); +++ __ mv(c_rarg3, x10); +++ call_VM(x10, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray), +++ c_rarg1, c_rarg2, c_rarg3); +++ // Must prevent reordering of stores for object initialization with stores that publish the new object. +++ __ membar(MacroAssembler::StoreStore); +++} +++ +++void TemplateTable::arraylength() { +++ transition(atos, itos); +++ __ null_check(x10, arrayOopDesc::length_offset_in_bytes()); +++ __ lwu(x10, Address(x10, arrayOopDesc::length_offset_in_bytes())); +++} +++ +++void TemplateTable::checkcast() +++{ +++ transition(atos, atos); +++ Label done, is_null, ok_is_subtype, quicked, resolved; +++ __ beqz(x10, is_null); +++ +++ // Get cpool & tags index +++ __ get_cpool_and_tags(x12, x13); // x12=cpool, x13=tags array +++ __ get_unsigned_2_byte_index_at_bcp(x9, 1); // x9=index +++ // See if bytecode has already been quicked +++ __ add(t0, x13, Array::base_offset_in_bytes()); +++ __ add(x11, t0, x9); +++ __ membar(MacroAssembler::AnyAny); +++ __ lbu(x11, x11); +++ __ membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore); +++ __ sub(t0, x11, (u1)JVM_CONSTANT_Class); +++ __ beqz(t0, quicked); +++ +++ __ push(atos); // save receiver for result, and for GC +++ call_VM(x10, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc)); +++ // vm_result_2 has metadata result +++ __ get_vm_result_2(x10, xthread); +++ __ pop_reg(x13); // restore receiver +++ __ j(resolved); +++ +++ // Get superklass in x10 and subklass in x13 +++ __ bind(quicked); +++ __ mv(x13, x10); // Save object in x13; x10 needed for subtype check +++ __ load_resolved_klass_at_offset(x12, x9, x10, t0); // x10 = klass +++ +++ __ bind(resolved); +++ __ load_klass(x9, x13); +++ +++ // Generate subtype check. Blows x12, x15. Object in x13. +++ // Superklass in x10. Subklass in x9. +++ __ gen_subtype_check(x9, ok_is_subtype); +++ +++ // Come here on failure +++ __ push_reg(x13); +++ // object is at TOS +++ __ j(Interpreter::_throw_ClassCastException_entry); +++ +++ // Come here on success +++ __ bind(ok_is_subtype); +++ __ mv(x10, x13); // Restore object in x13 +++ +++ // Collect counts on whether this test sees NULLs a lot or not. +++ if (ProfileInterpreter) { +++ __ j(done); +++ __ bind(is_null); +++ __ profile_null_seen(x12); +++ } else { +++ __ bind(is_null); // same as 'done' +++ } +++ __ bind(done); +++} +++ +++void TemplateTable::instanceof() { +++ transition(atos, itos); +++ Label done, is_null, ok_is_subtype, quicked, resolved; +++ __ beqz(x10, is_null); +++ +++ // Get cpool & tags index +++ __ get_cpool_and_tags(x12, x13); // x12=cpool, x13=tags array +++ __ get_unsigned_2_byte_index_at_bcp(x9, 1); // x9=index +++ // See if bytecode has already been quicked +++ __ add(t0, x13, Array::base_offset_in_bytes()); +++ __ add(x11, t0, x9); +++ __ membar(MacroAssembler::AnyAny); +++ __ lbu(x11, x11); +++ __ membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore); +++ __ sub(t0, x11, (u1)JVM_CONSTANT_Class); +++ __ beqz(t0, quicked); +++ +++ __ push(atos); // save receiver for result, and for GC +++ call_VM(x10, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc)); +++ // vm_result_2 has metadata result +++ __ get_vm_result_2(x10, xthread); +++ __ pop_reg(x13); // restore receiver +++ __ verify_oop(x13); +++ __ load_klass(x13, x13); +++ __ j(resolved); +++ +++ // Get superklass in x10 and subklass in x13 +++ __ bind(quicked); +++ __ load_klass(x13, x10); +++ __ load_resolved_klass_at_offset(x12, x9, x10, t0); +++ +++ __ bind(resolved); +++ +++ // Generate subtype check. Blows x12, x15 +++ // Superklass in x10. Subklass in x13. +++ __ gen_subtype_check(x13, ok_is_subtype); +++ +++ // Come here on failure +++ __ mv(x10, zr); +++ __ j(done); +++ // Come here on success +++ __ bind(ok_is_subtype); +++ __ mv(x10, 1); +++ +++ // Collect counts on whether this test sees NULLs a lot or not. +++ if (ProfileInterpreter) { +++ __ j(done); +++ __ bind(is_null); +++ __ profile_null_seen(x12); +++ } else { +++ __ bind(is_null); // same as 'done' +++ } +++ __ bind(done); +++ // x10 = 0: obj == NULL or obj is not an instanceof the specified klass +++ // x10 = 1: obj != NULL and obj is an instanceof the specified klass +++} +++ +++//----------------------------------------------------------------------------- +++// Breakpoints +++void TemplateTable::_breakpoint() { +++ // Note: We get here even if we are single stepping.. +++ // jbug inists on setting breakpoints at every bytecode +++ // even if we are in single step mode. +++ +++ transition(vtos, vtos); +++ +++ // get the unpatched byte code +++ __ get_method(c_rarg1); +++ __ call_VM(noreg, +++ CAST_FROM_FN_PTR(address, +++ InterpreterRuntime::get_original_bytecode_at), +++ c_rarg1, xbcp); +++ __ mv(x9, x10); +++ +++ // post the breakpoint event +++ __ call_VM(noreg, +++ CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), +++ xmethod, xbcp); +++ +++ // complete the execution of original bytecode +++ __ mv(t0, x9); +++ __ dispatch_only_normal(vtos); +++} +++ +++//----------------------------------------------------------------------------- +++// Exceptions +++ +++void TemplateTable::athrow() { +++ transition(atos, vtos); +++ __ null_check(x10); +++ __ j(Interpreter::throw_exception_entry()); +++} +++ +++//----------------------------------------------------------------------------- +++// Synchronization +++// +++// Note: monitorenter & exit are symmetric routines; which is reflected +++// in the assembly code structure as well +++// +++// Stack layout: +++// +++// [expressions ] <--- esp = expression stack top +++// .. +++// [expressions ] +++// [monitor entry] <--- monitor block top = expression stack bot +++// .. +++// [monitor entry] +++// [frame data ] <--- monitor block bot +++// ... +++// [saved fp ] <--- fp +++void TemplateTable::monitorenter() +++{ +++ transition(atos, vtos); +++ +++ // check for NULL object +++ __ null_check(x10); +++ +++ const Address monitor_block_top( +++ fp, frame::interpreter_frame_monitor_block_top_offset * wordSize); +++ const Address monitor_block_bot( +++ fp, frame::interpreter_frame_initial_sp_offset * wordSize); +++ const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; +++ +++ Label allocated; +++ +++ // initialize entry pointer +++ __ mv(c_rarg1, zr); // points to free slot or NULL +++ +++ // find a free slot in the monitor block (result in c_rarg1) +++ { +++ Label entry, loop, exit, notUsed; +++ __ ld(c_rarg3, monitor_block_top); // points to current entry, +++ // starting with top-most entry +++ __ la(c_rarg2, monitor_block_bot); // points to word before bottom +++ +++ __ j(entry); +++ +++ __ bind(loop); +++ // check if current entry is used +++ // if not used then remember entry in c_rarg1 +++ __ ld(t0, Address(c_rarg3, BasicObjectLock::obj_offset_in_bytes())); +++ __ bnez(t0, notUsed); +++ __ mv(c_rarg1, c_rarg3); +++ __ bind(notUsed); +++ // check if current entry is for same object +++ // if same object then stop searching +++ __ beq(x10, t0, exit); +++ // otherwise advance to next entry +++ __ add(c_rarg3, c_rarg3, entry_size); +++ __ bind(entry); +++ // check if bottom reached +++ // if not at bottom then check this entry +++ __ bne(c_rarg3, c_rarg2, loop); +++ __ bind(exit); +++ } +++ +++ __ bnez(c_rarg1, allocated); // check if a slot has been found and +++ // if found, continue with that on +++ +++ // allocate one if there's no free slot +++ { +++ Label entry, loop; +++ // 1. compute new pointers // esp: old expression stack top +++ __ ld(c_rarg1, monitor_block_bot); // c_rarg1: old expression stack bottom +++ __ sub(esp, esp, entry_size); // move expression stack top +++ __ sub(c_rarg1, c_rarg1, entry_size); // move expression stack bottom +++ __ mv(c_rarg3, esp); // set start value for copy loop +++ __ sd(c_rarg1, monitor_block_bot); // set new monitor block bottom +++ __ sub(sp, sp, entry_size); // make room for the monitor +++ +++ __ j(entry); +++ // 2. move expression stack contents +++ __ bind(loop); +++ __ ld(c_rarg2, Address(c_rarg3, entry_size)); // load expression stack +++ // word from old location +++ __ sd(c_rarg2, Address(c_rarg3, 0)); // and store it at new location +++ __ add(c_rarg3, c_rarg3, wordSize); // advance to next word +++ __ bind(entry); +++ __ bne(c_rarg3, c_rarg1, loop); // check if bottom reached.if not at bottom +++ // then copy next word +++ } +++ +++ // call run-time routine +++ // c_rarg1: points to monitor entry +++ __ bind(allocated); +++ +++ // Increment bcp to point to the next bytecode, so exception +++ // handling for async. exceptions work correctly. +++ // The object has already been poped from the stack, so the +++ // expression stack looks correct. +++ __ addi(xbcp, xbcp, 1); +++ +++ // store object +++ __ sd(x10, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes())); +++ __ lock_object(c_rarg1); +++ +++ // check to make sure this monitor doesn't cause stack overflow after locking +++ __ save_bcp(); // in case of exception +++ __ generate_stack_overflow_check(0); +++ +++ // The bcp has already been incremented. Just need to dispatch to +++ // next instruction. +++ __ dispatch_next(vtos); +++} +++ +++void TemplateTable::monitorexit() +++{ +++ transition(atos, vtos); +++ +++ // check for NULL object +++ __ null_check(x10); +++ +++ const Address monitor_block_top( +++ fp, frame::interpreter_frame_monitor_block_top_offset * wordSize); +++ const Address monitor_block_bot( +++ fp, frame::interpreter_frame_initial_sp_offset * wordSize); +++ const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; +++ +++ Label found; +++ +++ // find matching slot +++ { +++ Label entry, loop; +++ __ ld(c_rarg1, monitor_block_top); // points to current entry, +++ // starting with top-most entry +++ __ la(c_rarg2, monitor_block_bot); // points to word before bottom +++ // of monitor block +++ __ j(entry); +++ +++ __ bind(loop); +++ // check if current entry is for same object +++ __ ld(t0, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes())); +++ // if same object then stop searching +++ __ beq(x10, t0, found); +++ // otherwise advance to next entry +++ __ add(c_rarg1, c_rarg1, entry_size); +++ __ bind(entry); +++ // check if bottom reached +++ // if not at bottom then check this entry +++ __ bne(c_rarg1, c_rarg2, loop); +++ } +++ +++ // error handling. Unlocking was not block-structured +++ __ call_VM(noreg, CAST_FROM_FN_PTR(address, +++ InterpreterRuntime::throw_illegal_monitor_state_exception)); +++ __ should_not_reach_here(); +++ +++ // call run-time routine +++ __ bind(found); +++ __ push_ptr(x10); // make sure object is on stack (contract with oopMaps) +++ __ unlock_object(c_rarg1); +++ __ pop_ptr(x10); // discard object +++} +++ +++// Wide instructions +++void TemplateTable::wide() +++{ +++ __ load_unsigned_byte(x9, at_bcp(1)); +++ __ mv(t0, (address)Interpreter::_wentry_point); +++ __ shadd(t0, x9, t0, t1, 3); +++ __ ld(t0, Address(t0)); +++ __ jr(t0); +++} +++ +++// Multi arrays +++void TemplateTable::multianewarray() { +++ transition(vtos, atos); +++ __ load_unsigned_byte(x10, at_bcp(3)); // get number of dimensions +++ // last dim is on top of stack; we want address of first one: +++ // first_addr = last_addr + (ndims - 1) * wordSize +++ __ shadd(c_rarg1, x10, esp, c_rarg1, 3); +++ __ sub(c_rarg1, c_rarg1, wordSize); +++ call_VM(x10, +++ CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), +++ c_rarg1); +++ __ load_unsigned_byte(x11, at_bcp(3)); +++ __ shadd(esp, x11, esp, t0, 3); +++} ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/templateTable_riscv.hpp ++@@ -0,0 +1,42 @@ +++/* +++ * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2014, Red Hat Inc. All rights reserved. +++ * Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#ifndef CPU_RISCV_TEMPLATETABLE_RISCV_HPP +++#define CPU_RISCV_TEMPLATETABLE_RISCV_HPP +++ +++static void prepare_invoke(int byte_no, +++ Register method, // linked method (or i-klass) +++ Register index = noreg, // itable index, MethodType, etc. +++ Register recv = noreg, // if caller wants to see it +++ Register flags = noreg // if caller wants to test it +++ ); +++static void invokevirtual_helper(Register index, Register recv, +++ Register flags); +++ +++// Helpers +++static void index_check(Register array, Register index); +++ +++#endif // CPU_RISCV_TEMPLATETABLE_RISCV_HPP ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/universalNativeInvoker_riscv.cpp ++@@ -0,0 +1,33 @@ +++/* +++ * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#include "precompiled.hpp" +++#include "prims/universalNativeInvoker.hpp" +++#include "utilities/debug.hpp" +++ +++address ProgrammableInvoker::generate_adapter(jobject jabi, jobject jlayout) { +++ Unimplemented(); +++ return nullptr; +++} ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/universalUpcallHandle_riscv.cpp ++@@ -0,0 +1,42 @@ +++/* +++ * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#include "precompiled.hpp" +++#include "prims/universalUpcallHandler.hpp" +++#include "utilities/debug.hpp" +++ +++address ProgrammableUpcallHandler::generate_upcall_stub(jobject jrec, jobject jabi, jobject jlayout) { +++ Unimplemented(); +++ return nullptr; +++} +++ +++address ProgrammableUpcallHandler::generate_optimized_upcall_stub(jobject mh, Method* entry, jobject jabi, jobject jconv) { +++ ShouldNotCallThis(); +++ return nullptr; +++} +++ +++bool ProgrammableUpcallHandler::supports_optimized_upcalls() { +++ return false; +++} ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/vmStructs_riscv.hpp ++@@ -0,0 +1,42 @@ +++/* +++ * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#ifndef CPU_RISCV_VMSTRUCTS_RISCV_HPP +++#define CPU_RISCV_VMSTRUCTS_RISCV_HPP +++ +++// These are the CPU-specific fields, types and integer +++// constants required by the Serviceability Agent. This file is +++// referenced by vmStructs.cpp. +++ +++#define VM_STRUCTS_CPU(nonstatic_field, static_field, unchecked_nonstatic_field, volatile_nonstatic_field, nonproduct_nonstatic_field, c2_nonstatic_field, unchecked_c1_static_field, unchecked_c2_static_field) \ +++ volatile_nonstatic_field(JavaFrameAnchor, _last_Java_fp, intptr_t*) +++ +++#define VM_TYPES_CPU(declare_type, declare_toplevel_type, declare_oop_type, declare_integer_type, declare_unsigned_integer_type, declare_c1_toplevel_type, declare_c2_type, declare_c2_toplevel_type) +++ +++#define VM_INT_CONSTANTS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant) +++ +++#define VM_LONG_CONSTANTS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant) +++ +++#endif // CPU_RISCV_VMSTRUCTS_RISCV_HPP ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/vm_version_ext_riscv.cpp ++@@ -0,0 +1,87 @@ +++/* +++ * Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#include "precompiled.hpp" +++#include "memory/allocation.hpp" +++#include "memory/allocation.inline.hpp" +++#include "runtime/os.inline.hpp" +++#include "vm_version_ext_riscv.hpp" +++ +++// VM_Version_Ext statics +++int VM_Version_Ext::_no_of_threads = 0; +++int VM_Version_Ext::_no_of_cores = 0; +++int VM_Version_Ext::_no_of_sockets = 0; +++bool VM_Version_Ext::_initialized = false; +++char VM_Version_Ext::_cpu_name[CPU_TYPE_DESC_BUF_SIZE] = {0}; +++char VM_Version_Ext::_cpu_desc[CPU_DETAILED_DESC_BUF_SIZE] = {0}; +++ +++void VM_Version_Ext::initialize_cpu_information(void) { +++ // do nothing if cpu info has been initialized +++ if (_initialized) { +++ return; +++ } +++ +++ _no_of_cores = os::processor_count(); +++ _no_of_threads = _no_of_cores; +++ _no_of_sockets = _no_of_cores; +++ snprintf(_cpu_name, CPU_TYPE_DESC_BUF_SIZE - 1, "RISCV64"); +++ snprintf(_cpu_desc, CPU_DETAILED_DESC_BUF_SIZE, "RISCV64 %s", _features_string); +++ _initialized = true; +++} +++ +++int VM_Version_Ext::number_of_threads(void) { +++ initialize_cpu_information(); +++ return _no_of_threads; +++} +++ +++int VM_Version_Ext::number_of_cores(void) { +++ initialize_cpu_information(); +++ return _no_of_cores; +++} +++ +++int VM_Version_Ext::number_of_sockets(void) { +++ initialize_cpu_information(); +++ return _no_of_sockets; +++} +++ +++const char* VM_Version_Ext::cpu_name(void) { +++ initialize_cpu_information(); +++ char* tmp = NEW_C_HEAP_ARRAY_RETURN_NULL(char, CPU_TYPE_DESC_BUF_SIZE, mtTracing); +++ if (NULL == tmp) { +++ return NULL; +++ } +++ strncpy(tmp, _cpu_name, CPU_TYPE_DESC_BUF_SIZE); +++ return tmp; +++} +++ +++const char* VM_Version_Ext::cpu_description(void) { +++ initialize_cpu_information(); +++ char* tmp = NEW_C_HEAP_ARRAY_RETURN_NULL(char, CPU_DETAILED_DESC_BUF_SIZE, mtTracing); +++ if (NULL == tmp) { +++ return NULL; +++ } +++ strncpy(tmp, _cpu_desc, CPU_DETAILED_DESC_BUF_SIZE); +++ return tmp; +++} ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/vm_version_ext_riscv.hpp ++@@ -0,0 +1,55 @@ +++/* +++ * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#ifndef CPU_RISCV_VM_VERSION_EXT_RISCV_HPP +++#define CPU_RISCV_VM_VERSION_EXT_RISCV_HPP +++ +++#include "runtime/vm_version.hpp" +++#include "utilities/macros.hpp" +++ +++class VM_Version_Ext : public VM_Version { +++ private: +++ static const size_t CPU_TYPE_DESC_BUF_SIZE = 256; +++ static const size_t CPU_DETAILED_DESC_BUF_SIZE = 4096; +++ +++ static int _no_of_threads; +++ static int _no_of_cores; +++ static int _no_of_sockets; +++ static bool _initialized; +++ static char _cpu_name[CPU_TYPE_DESC_BUF_SIZE]; +++ static char _cpu_desc[CPU_DETAILED_DESC_BUF_SIZE]; +++ +++ public: +++ static int number_of_threads(void); +++ static int number_of_cores(void); +++ static int number_of_sockets(void); +++ +++ static const char* cpu_name(void); +++ static const char* cpu_description(void); +++ static void initialize_cpu_information(void); +++ +++}; +++ +++#endif // CPU_RISCV_VM_VERSION_EXT_RISCV_HPP ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/vm_version_riscv.cpp ++@@ -0,0 +1,225 @@ +++/* +++ * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#include "precompiled.hpp" +++#include "runtime/java.hpp" +++#include "runtime/os.hpp" +++#include "runtime/vm_version.hpp" +++#include "utilities/formatBuffer.hpp" +++#include "utilities/macros.hpp" +++ +++#include OS_HEADER_INLINE(os) +++ +++const char* VM_Version::_uarch = ""; +++const char* VM_Version::_vm_mode = ""; +++uint32_t VM_Version::_initial_vector_length = 0; +++ +++void VM_Version::initialize() { +++ _supports_cx8 = true; +++ _supports_atomic_getset4 = true; +++ _supports_atomic_getadd4 = true; +++ _supports_atomic_getset8 = true; +++ _supports_atomic_getadd8 = true; +++ +++ get_os_cpu_info(); +++ +++ // check if satp.mode is supported, currently supports up to SV48(RV64) +++ if (get_satp_mode() > VM_SV48) { +++ vm_exit_during_initialization( +++ err_msg("Unsupported satp mode: %s. Only satp modes up to sv48 are supported for now.", +++ _vm_mode)); +++ } +++ +++ if (FLAG_IS_DEFAULT(UseFMA)) { +++ FLAG_SET_DEFAULT(UseFMA, true); +++ } +++ +++ if (FLAG_IS_DEFAULT(AllocatePrefetchDistance)) { +++ FLAG_SET_DEFAULT(AllocatePrefetchDistance, 0); +++ } +++ +++ if (UseAES || UseAESIntrinsics) { +++ if (UseAES && !FLAG_IS_DEFAULT(UseAES)) { +++ warning("AES instructions are not available on this CPU"); +++ FLAG_SET_DEFAULT(UseAES, false); +++ } +++ if (UseAESIntrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) { +++ warning("AES intrinsics are not available on this CPU"); +++ FLAG_SET_DEFAULT(UseAESIntrinsics, false); +++ } +++ } +++ +++ if (UseAESCTRIntrinsics) { +++ warning("AES/CTR intrinsics are not available on this CPU"); +++ FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false); +++ } +++ +++ if (UseSHA) { +++ warning("SHA instructions are not available on this CPU"); +++ FLAG_SET_DEFAULT(UseSHA, false); +++ } +++ +++ if (UseSHA1Intrinsics) { +++ warning("Intrinsics for SHA-1 crypto hash functions not available on this CPU."); +++ FLAG_SET_DEFAULT(UseSHA1Intrinsics, false); +++ } +++ +++ if (UseSHA256Intrinsics) { +++ warning("Intrinsics for SHA-224 and SHA-256 crypto hash functions not available on this CPU."); +++ FLAG_SET_DEFAULT(UseSHA256Intrinsics, false); +++ } +++ +++ if (UseSHA512Intrinsics) { +++ warning("Intrinsics for SHA-384 and SHA-512 crypto hash functions not available on this CPU."); +++ FLAG_SET_DEFAULT(UseSHA512Intrinsics, false); +++ } +++ +++ if (UseSHA3Intrinsics) { +++ warning("Intrinsics for SHA3-224, SHA3-256, SHA3-384 and SHA3-512 crypto hash functions not available on this CPU."); +++ FLAG_SET_DEFAULT(UseSHA3Intrinsics, false); +++ } +++ +++ if (UseCRC32Intrinsics) { +++ warning("CRC32 intrinsics are not available on this CPU."); +++ FLAG_SET_DEFAULT(UseCRC32Intrinsics, false); +++ } +++ +++ if (UseCRC32CIntrinsics) { +++ warning("CRC32C intrinsics are not available on this CPU."); +++ FLAG_SET_DEFAULT(UseCRC32CIntrinsics, false); +++ } +++ +++ if (UseMD5Intrinsics) { +++ warning("MD5 intrinsics are not available on this CPU."); +++ FLAG_SET_DEFAULT(UseMD5Intrinsics, false); +++ } +++ +++ if (UseRVV) { +++ if (!(_features & CPU_V)) { +++ warning("RVV is not supported on this CPU"); +++ FLAG_SET_DEFAULT(UseRVV, false); +++ } else { +++ // read vector length from vector CSR vlenb +++ _initial_vector_length = get_current_vector_length(); +++ } +++ } +++ +++ if (UseRVC && !(_features & CPU_C)) { +++ warning("RVC is not supported on this CPU"); +++ FLAG_SET_DEFAULT(UseRVC, false); +++ } +++ +++ if (FLAG_IS_DEFAULT(AvoidUnalignedAccesses)) { +++ FLAG_SET_DEFAULT(AvoidUnalignedAccesses, true); +++ } +++ +++ if (UseZbb) { +++ if (FLAG_IS_DEFAULT(UsePopCountInstruction)) { +++ FLAG_SET_DEFAULT(UsePopCountInstruction, true); +++ } +++ } else { +++ FLAG_SET_DEFAULT(UsePopCountInstruction, false); +++ } +++ +++ char buf[512]; +++ buf[0] = '\0'; +++ if (_uarch != NULL && strcmp(_uarch, "") != 0) snprintf(buf, sizeof(buf), "%s,", _uarch); +++ strcat(buf, "rv64"); +++#define ADD_FEATURE_IF_SUPPORTED(id, name, bit) if (_features & CPU_##id) strcat(buf, name); +++ CPU_FEATURE_FLAGS(ADD_FEATURE_IF_SUPPORTED) +++#undef ADD_FEATURE_IF_SUPPORTED +++ +++ _features_string = os::strdup(buf); +++ +++#ifdef COMPILER2 +++ c2_initialize(); +++#endif // COMPILER2 +++} +++ +++#ifdef COMPILER2 +++void VM_Version::c2_initialize() { +++ if (UseCMoveUnconditionally) { +++ FLAG_SET_DEFAULT(UseCMoveUnconditionally, false); +++ } +++ +++ if (ConditionalMoveLimit > 0) { +++ FLAG_SET_DEFAULT(ConditionalMoveLimit, 0); +++ } +++ +++ if (!UseRVV) { +++ FLAG_SET_DEFAULT(SpecialEncodeISOArray, false); +++ } +++ +++ if (!UseRVV && MaxVectorSize) { +++ FLAG_SET_DEFAULT(MaxVectorSize, 0); +++ } +++ +++ if (!UseRVV) { +++ FLAG_SET_DEFAULT(UseRVVForBigIntegerShiftIntrinsics, false); +++ } +++ +++ if (UseRVV) { +++ if (FLAG_IS_DEFAULT(MaxVectorSize)) { +++ MaxVectorSize = _initial_vector_length; +++ } else if (MaxVectorSize < 16) { +++ warning("RVV does not support vector length less than 16 bytes. Disabling RVV."); +++ UseRVV = false; +++ } else if (is_power_of_2(MaxVectorSize)) { +++ if (MaxVectorSize > _initial_vector_length) { +++ warning("Current system only supports max RVV vector length %d. Set MaxVectorSize to %d", +++ _initial_vector_length, _initial_vector_length); +++ } +++ MaxVectorSize = _initial_vector_length; +++ } else { +++ vm_exit_during_initialization(err_msg("Unsupported MaxVectorSize: %d", (int)MaxVectorSize)); +++ } +++ } +++ +++ // disable prefetch +++ if (FLAG_IS_DEFAULT(AllocatePrefetchStyle)) { +++ FLAG_SET_DEFAULT(AllocatePrefetchStyle, 0); +++ } +++ +++ if (FLAG_IS_DEFAULT(UseMulAddIntrinsic)) { +++ FLAG_SET_DEFAULT(UseMulAddIntrinsic, true); +++ } +++ +++ if (FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) { +++ FLAG_SET_DEFAULT(UseMultiplyToLenIntrinsic, true); +++ } +++ +++ if (FLAG_IS_DEFAULT(UseSquareToLenIntrinsic)) { +++ FLAG_SET_DEFAULT(UseSquareToLenIntrinsic, true); +++ } +++ +++ if (FLAG_IS_DEFAULT(UseMontgomeryMultiplyIntrinsic)) { +++ FLAG_SET_DEFAULT(UseMontgomeryMultiplyIntrinsic, true); +++ } +++ +++ if (FLAG_IS_DEFAULT(UseMontgomerySquareIntrinsic)) { +++ FLAG_SET_DEFAULT(UseMontgomerySquareIntrinsic, true); +++ } +++} +++#endif // COMPILER2 ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/vm_version_riscv.hpp ++@@ -0,0 +1,82 @@ +++/* +++ * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved. +++ * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#ifndef CPU_RISCV_VM_VERSION_RISCV_HPP +++#define CPU_RISCV_VM_VERSION_RISCV_HPP +++ +++#include "runtime/abstract_vm_version.hpp" +++#include "runtime/arguments.hpp" +++#include "runtime/globals_extension.hpp" +++#include "utilities/sizes.hpp" +++ +++class VM_Version : public Abstract_VM_Version { +++#ifdef COMPILER2 +++private: +++ static void c2_initialize(); +++#endif // COMPILER2 +++ +++// VM modes (satp.mode) privileged ISA 1.10 +++enum VM_MODE { +++ VM_MBARE = 0, +++ VM_SV39 = 8, +++ VM_SV48 = 9, +++ VM_SV57 = 10, +++ VM_SV64 = 11 +++}; +++ +++protected: +++ static const char* _uarch; +++ static const char* _vm_mode; +++ static uint32_t _initial_vector_length; +++ static void get_os_cpu_info(); +++ static uint32_t get_current_vector_length(); +++ static VM_MODE get_satp_mode(); +++ +++public: +++ // Initialization +++ static void initialize(); +++ +++ constexpr static bool supports_stack_watermark_barrier() { return true; } +++ +++ enum Feature_Flag { +++#define CPU_FEATURE_FLAGS(decl) \ +++ decl(I, "i", 8) \ +++ decl(M, "m", 12) \ +++ decl(A, "a", 0) \ +++ decl(F, "f", 5) \ +++ decl(D, "d", 3) \ +++ decl(C, "c", 2) \ +++ decl(V, "v", 21) +++ +++#define DECLARE_CPU_FEATURE_FLAG(id, name, bit) CPU_##id = (1 << bit), +++ CPU_FEATURE_FLAGS(DECLARE_CPU_FEATURE_FLAG) +++#undef DECLARE_CPU_FEATURE_FLAG +++ }; +++ +++ static void initialize_cpu_information(void); +++}; +++ +++#endif // CPU_RISCV_VM_VERSION_RISCV_HPP ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/vmreg_riscv.cpp ++@@ -0,0 +1,64 @@ +++/* +++ * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#include "precompiled.hpp" +++#include "asm/assembler.hpp" +++#include "code/vmreg.hpp" +++ +++void VMRegImpl::set_regName() { +++ int i = 0; +++ Register reg = ::as_Register(0); +++ for ( ; i < ConcreteRegisterImpl::max_gpr ; ) { +++ for (int j = 0 ; j < RegisterImpl::max_slots_per_register ; j++) { +++ regName[i++] = reg->name(); +++ } +++ reg = reg->successor(); +++ } +++ +++ FloatRegister freg = ::as_FloatRegister(0); +++ for ( ; i < ConcreteRegisterImpl::max_fpr ; ) { +++ for (int j = 0 ; j < FloatRegisterImpl::max_slots_per_register ; j++) { +++ regName[i++] = freg->name(); +++ } +++ freg = freg->successor(); +++ } +++ +++ VectorRegister vreg = ::as_VectorRegister(0); +++ for ( ; i < ConcreteRegisterImpl::max_vpr ; ) { +++ for (int j = 0 ; j < VectorRegisterImpl::max_slots_per_register ; j++) { +++ regName[i++] = vreg->name(); +++ } +++ vreg = vreg->successor(); +++ } +++ +++ for ( ; i < ConcreteRegisterImpl::number_of_registers ; i++) { +++ regName[i] = "NON-GPR-FPR-VPR"; +++ } +++} +++ +++VMReg VMRegImpl::vmStorageToVMReg(int type, int index) { +++ Unimplemented(); +++ return VMRegImpl::Bad(); +++} ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/vmreg_riscv.hpp ++@@ -0,0 +1,68 @@ +++/* +++ * Copyright (c) 2006, 2019, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#ifndef CPU_RISCV_VMREG_RISCV_HPP +++#define CPU_RISCV_VMREG_RISCV_HPP +++ +++inline bool is_Register() { +++ return (unsigned int) value() < (unsigned int) ConcreteRegisterImpl::max_gpr; +++} +++ +++inline bool is_FloatRegister() { +++ return value() >= ConcreteRegisterImpl::max_gpr && value() < ConcreteRegisterImpl::max_fpr; +++} +++ +++inline bool is_VectorRegister() { +++ return value() >= ConcreteRegisterImpl::max_fpr && value() < ConcreteRegisterImpl::max_vpr; +++} +++ +++inline Register as_Register() { +++ assert(is_Register(), "must be"); +++ return ::as_Register(value() / RegisterImpl::max_slots_per_register); +++} +++ +++inline FloatRegister as_FloatRegister() { +++ assert(is_FloatRegister() && is_even(value()), "must be"); +++ return ::as_FloatRegister((value() - ConcreteRegisterImpl::max_gpr) / +++ FloatRegisterImpl::max_slots_per_register); +++} +++ +++inline VectorRegister as_VectorRegister() { +++ assert(is_VectorRegister() && ((value() & (VectorRegisterImpl::max_slots_per_register - 1)) == 0), "must be"); +++ return ::as_VectorRegister((value() - ConcreteRegisterImpl::max_fpr) / +++ VectorRegisterImpl::max_slots_per_register); +++} +++ +++inline bool is_concrete() { +++ assert(is_reg(), "must be"); +++ if (is_VectorRegister()) { +++ int base = value() - ConcreteRegisterImpl::max_fpr; +++ return (base % VectorRegisterImpl::max_slots_per_register) == 0; +++ } else { +++ return is_even(value()); +++ } +++} +++ +++#endif // CPU_RISCV_VMREG_RISCV_HPP ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/vmreg_riscv.inline.hpp ++@@ -0,0 +1,46 @@ +++/* +++ * Copyright (c) 2006, 2020, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#ifndef CPU_RISCV_VM_VMREG_RISCV_INLINE_HPP +++#define CPU_RISCV_VM_VMREG_RISCV_INLINE_HPP +++ +++inline VMReg RegisterImpl::as_VMReg() { +++ if (this == noreg) { +++ return VMRegImpl::Bad(); +++ } +++ return VMRegImpl::as_VMReg(encoding() * RegisterImpl::max_slots_per_register); +++} +++ +++inline VMReg FloatRegisterImpl::as_VMReg() { +++ return VMRegImpl::as_VMReg((encoding() * FloatRegisterImpl::max_slots_per_register) + +++ ConcreteRegisterImpl::max_gpr); +++} +++ +++inline VMReg VectorRegisterImpl::as_VMReg() { +++ return VMRegImpl::as_VMReg((encoding() * VectorRegisterImpl::max_slots_per_register) + +++ ConcreteRegisterImpl::max_fpr); +++} +++ +++#endif // CPU_RISCV_VM_VMREG_RISCV_INLINE_HPP ++--- /dev/null +++++ b/src/hotspot/cpu/riscv/vtableStubs_riscv.cpp ++@@ -0,0 +1,260 @@ +++/* +++ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2014, Red Hat Inc. All rights reserved. +++ * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#include "precompiled.hpp" +++#include "asm/macroAssembler.inline.hpp" +++#include "assembler_riscv.inline.hpp" +++#include "code/vtableStubs.hpp" +++#include "interp_masm_riscv.hpp" +++#include "memory/resourceArea.hpp" +++#include "oops/compiledICHolder.hpp" +++#include "oops/instanceKlass.hpp" +++#include "oops/klassVtable.hpp" +++#include "runtime/sharedRuntime.hpp" +++#include "vmreg_riscv.inline.hpp" +++#ifdef COMPILER2 +++#include "opto/runtime.hpp" +++#endif +++ +++// machine-dependent part of VtableStubs: create VtableStub of correct size and +++// initialize its code +++ +++#define __ masm-> +++ +++#ifndef PRODUCT +++extern "C" void bad_compiled_vtable_index(JavaThread* thread, oop receiver, int index); +++#endif +++ +++VtableStub* VtableStubs::create_vtable_stub(int vtable_index) { +++ // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing. +++ const int stub_code_length = code_size_limit(true); +++ VtableStub* s = new(stub_code_length) VtableStub(true, vtable_index); +++ // Can be NULL if there is no free space in the code cache. +++ if (s == NULL) { +++ return NULL; +++ } +++ +++ // Count unused bytes in instruction sequences of variable size. +++ // We add them to the computed buffer size in order to avoid +++ // overflow in subsequently generated stubs. +++ address start_pc = NULL; +++ int slop_bytes = 0; +++ int slop_delta = 0; +++ +++ ResourceMark rm; +++ CodeBuffer cb(s->entry_point(), stub_code_length); +++ MacroAssembler* masm = new MacroAssembler(&cb); +++ assert_cond(masm != NULL); +++ +++#if (!defined(PRODUCT) && defined(COMPILER2)) +++ if (CountCompiledCalls) { +++ __ la(t2, ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr())); +++ __ increment(Address(t2)); +++ } +++#endif +++ +++ // get receiver (need to skip return address on top of stack) +++ assert(VtableStub::receiver_location() == j_rarg0->as_VMReg(), "receiver expected in j_rarg0"); +++ +++ // get receiver klass +++ address npe_addr = __ pc(); +++ __ load_klass(t2, j_rarg0); +++ +++#ifndef PRODUCT +++ if (DebugVtables) { +++ Label L; +++ start_pc = __ pc(); +++ +++ // check offset vs vtable length +++ __ lwu(t0, Address(t2, Klass::vtable_length_offset())); +++ __ mv(t1, vtable_index * vtableEntry::size()); +++ __ bgt(t0, t1, L); +++ __ enter(); +++ __ mv(x12, vtable_index); +++ +++ __ call_VM(noreg, CAST_FROM_FN_PTR(address, bad_compiled_vtable_index), j_rarg0, x12); +++ const ptrdiff_t estimate = 256; +++ const ptrdiff_t codesize = __ pc() - start_pc; +++ slop_delta = estimate - codesize; // call_VM varies in length, depending on data +++ slop_bytes += slop_delta; +++ assert(slop_delta >= 0, "vtable #%d: Code size estimate (%d) for DebugVtables too small, required: %d", vtable_index, (int)estimate, (int)codesize); +++ +++ __ leave(); +++ __ bind(L); +++ } +++#endif // PRODUCT +++ +++ start_pc = __ pc(); +++ __ lookup_virtual_method(t2, vtable_index, xmethod); +++ // lookup_virtual_method generates +++ // 4 instructions (maximum value encountered in normal case):li(lui + addiw) + add + ld +++ // 1 instruction (best case):ld * 1 +++ slop_delta = 16 - (int)(__ pc() - start_pc); +++ slop_bytes += slop_delta; +++ assert(slop_delta >= 0, "negative slop(%d) encountered, adjust code size estimate!", slop_delta); +++ +++#ifndef PRODUCT +++ if (DebugVtables) { +++ Label L; +++ __ beqz(xmethod, L); +++ __ ld(t0, Address(xmethod, Method::from_compiled_offset())); +++ __ bnez(t0, L); +++ __ stop("Vtable entry is NULL"); +++ __ bind(L); +++ } +++#endif // PRODUCT +++ +++ // x10: receiver klass +++ // xmethod: Method* +++ // x12: receiver +++ address ame_addr = __ pc(); +++ __ ld(t0, Address(xmethod, Method::from_compiled_offset())); +++ __ jr(t0); +++ +++ masm->flush(); +++ bookkeeping(masm, tty, s, npe_addr, ame_addr, true, vtable_index, slop_bytes, 0); +++ +++ return s; +++} +++ +++VtableStub* VtableStubs::create_itable_stub(int itable_index) { +++ // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing. +++ const int stub_code_length = code_size_limit(false); +++ VtableStub* s = new(stub_code_length) VtableStub(false, itable_index); +++ // Can be NULL if there is no free space in the code cache. +++ if (s == NULL) { +++ return NULL; +++ } +++ // Count unused bytes in instruction sequences of variable size. +++ // We add them to the computed buffer size in order to avoid +++ // overflow in subsequently generated stubs. +++ address start_pc = NULL; +++ int slop_bytes = 0; +++ int slop_delta = 0; +++ +++ ResourceMark rm; +++ CodeBuffer cb(s->entry_point(), stub_code_length); +++ MacroAssembler* masm = new MacroAssembler(&cb); +++ assert_cond(masm != NULL); +++ +++#if (!defined(PRODUCT) && defined(COMPILER2)) +++ if (CountCompiledCalls) { +++ __ la(x18, ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr())); +++ __ increment(Address(x18)); +++ } +++#endif +++ +++ // get receiver (need to skip return address on top of stack) +++ assert(VtableStub::receiver_location() == j_rarg0->as_VMReg(), "receiver expected in j_rarg0"); +++ +++ // Entry arguments: +++ // t1: CompiledICHolder +++ // j_rarg0: Receiver +++ +++ // This stub is called from compiled code which has no callee-saved registers, +++ // so all registers except arguments are free at this point. +++ const Register recv_klass_reg = x18; +++ const Register holder_klass_reg = x19; // declaring interface klass (DECC) +++ const Register resolved_klass_reg = xmethod; // resolved interface klass (REFC) +++ const Register temp_reg = x28; +++ const Register temp_reg2 = x29; +++ const Register icholder_reg = t1; +++ +++ Label L_no_such_interface; +++ +++ __ ld(resolved_klass_reg, Address(icholder_reg, CompiledICHolder::holder_klass_offset())); +++ __ ld(holder_klass_reg, Address(icholder_reg, CompiledICHolder::holder_metadata_offset())); +++ +++ start_pc = __ pc(); +++ +++ // get receiver klass (also an implicit null-check) +++ address npe_addr = __ pc(); +++ __ load_klass(recv_klass_reg, j_rarg0); +++ +++ // Receiver subtype check against REFC. +++ __ lookup_interface_method(// inputs: rec. class, interface +++ recv_klass_reg, resolved_klass_reg, noreg, +++ // outputs: scan temp. reg1, scan temp. reg2 +++ temp_reg2, temp_reg, +++ L_no_such_interface, +++ /*return_method=*/false); +++ +++ const ptrdiff_t typecheckSize = __ pc() - start_pc; +++ start_pc = __ pc(); +++ +++ // Get selected method from declaring class and itable index +++ __ lookup_interface_method(// inputs: rec. class, interface, itable index +++ recv_klass_reg, holder_klass_reg, itable_index, +++ // outputs: method, scan temp. reg +++ xmethod, temp_reg, +++ L_no_such_interface); +++ +++ const ptrdiff_t lookupSize = __ pc() - start_pc; +++ +++ // Reduce "estimate" such that "padding" does not drop below 8. +++ const ptrdiff_t estimate = 256; +++ const ptrdiff_t codesize = typecheckSize + lookupSize; +++ slop_delta = (int)(estimate - codesize); +++ slop_bytes += slop_delta; +++ assert(slop_delta >= 0, "itable #%d: Code size estimate (%d) for lookup_interface_method too small, required: %d", itable_index, (int)estimate, (int)codesize); +++ +++#ifdef ASSERT +++ if (DebugVtables) { +++ Label L2; +++ __ beqz(xmethod, L2); +++ __ ld(t0, Address(xmethod, Method::from_compiled_offset())); +++ __ bnez(t0, L2); +++ __ stop("compiler entrypoint is null"); +++ __ bind(L2); +++ } +++#endif // ASSERT +++ +++ // xmethod: Method* +++ // j_rarg0: receiver +++ address ame_addr = __ pc(); +++ __ ld(t0, Address(xmethod, Method::from_compiled_offset())); +++ __ jr(t0); +++ +++ __ bind(L_no_such_interface); +++ // Handle IncompatibleClassChangeError in itable stubs. +++ // More detailed error message. +++ // We force resolving of the call site by jumping to the "handle +++ // wrong method" stub, and so let the interpreter runtime do all the +++ // dirty work. +++ assert(SharedRuntime::get_handle_wrong_method_stub() != NULL, "check initialization order"); +++ __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub())); +++ +++ masm->flush(); +++ bookkeeping(masm, tty, s, npe_addr, ame_addr, false, itable_index, slop_bytes, 0); +++ +++ return s; +++} +++ +++int VtableStub::pd_code_alignment() { +++ // RISCV cache line size is not an architected constant. We just align on word size. +++ const unsigned int icache_line_size = wordSize; +++ return icache_line_size; +++} ++--- a/src/hotspot/os/linux/os_linux.cpp +++++ b/src/hotspot/os/linux/os_linux.cpp ++@@ -2559,6 +2559,8 @@ void os::get_summary_cpu_info(char* cpui ++ strncpy(cpuinfo, "IA64", length); ++ #elif defined(PPC) ++ strncpy(cpuinfo, "PPC64", length); +++#elif defined(RISCV) +++ strncpy(cpuinfo, "RISCV64", length); ++ #elif defined(S390) ++ strncpy(cpuinfo, "S390", length); ++ #elif defined(SPARC) ++--- /dev/null +++++ b/src/hotspot/os_cpu/linux_riscv/assembler_linux_riscv.cpp ++@@ -0,0 +1,26 @@ +++/* +++ * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++// nothing required here ++--- /dev/null +++++ b/src/hotspot/os_cpu/linux_riscv/atomic_linux_riscv.hpp ++@@ -0,0 +1,151 @@ +++/* +++ * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#ifndef OS_CPU_LINUX_RISCV_ATOMIC_LINUX_RISCV_HPP +++#define OS_CPU_LINUX_RISCV_ATOMIC_LINUX_RISCV_HPP +++ +++#include "runtime/vm_version.hpp" +++ +++// Implementation of class atomic +++ +++// Note that memory_order_conservative requires a full barrier after atomic stores. +++// See https://patchwork.kernel.org/patch/3575821/ +++ +++template +++struct Atomic::PlatformAdd { +++ template +++ D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const { +++ if (order != memory_order_relaxed) { +++ FULL_MEM_BARRIER; +++ } +++ +++ D res = __atomic_add_fetch(dest, add_value, __ATOMIC_RELAXED); +++ +++ if (order != memory_order_relaxed) { +++ FULL_MEM_BARRIER; +++ } +++ return res; +++ } +++ +++ template +++ D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const { +++ return add_and_fetch(dest, add_value, order) - add_value; +++ } +++}; +++ +++template +++template +++inline T Atomic::PlatformXchg::operator()(T volatile* dest, +++ T exchange_value, +++ atomic_memory_order order) const { +++ STATIC_ASSERT(byte_size == sizeof(T)); +++ if (order != memory_order_relaxed) { +++ FULL_MEM_BARRIER; +++ } +++ +++ T res = __atomic_exchange_n(dest, exchange_value, __ATOMIC_RELAXED); +++ +++ if (order != memory_order_relaxed) { +++ FULL_MEM_BARRIER; +++ } +++ return res; +++} +++ +++// __attribute__((unused)) on dest is to get rid of spurious GCC warnings. +++template +++template +++inline T Atomic::PlatformCmpxchg::operator()(T volatile* dest __attribute__((unused)), +++ T compare_value, +++ T exchange_value, +++ atomic_memory_order order) const { +++ STATIC_ASSERT(byte_size == sizeof(T)); +++ T value = compare_value; +++ if (order != memory_order_relaxed) { +++ FULL_MEM_BARRIER; +++ } +++ +++ __atomic_compare_exchange(dest, &value, &exchange_value, /* weak */ false, +++ __ATOMIC_RELAXED, __ATOMIC_RELAXED); +++ +++ if (order != memory_order_relaxed) { +++ FULL_MEM_BARRIER; +++ } +++ return value; +++} +++ +++template<> +++template +++inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest __attribute__((unused)), +++ T compare_value, +++ T exchange_value, +++ atomic_memory_order order) const { +++ STATIC_ASSERT(4 == sizeof(T)); +++ +++ T old_value; +++ long rc; +++ +++ if (order != memory_order_relaxed) { +++ FULL_MEM_BARRIER; +++ } +++ +++ __asm__ __volatile__ ( +++ "1: sext.w %1, %3 \n\t" // sign-extend compare_value +++ " lr.w %0, %2 \n\t" +++ " bne %0, %1, 2f \n\t" +++ " sc.w %1, %4, %2 \n\t" +++ " bnez %1, 1b \n\t" +++ "2: \n\t" +++ : /*%0*/"=&r" (old_value), /*%1*/"=&r" (rc), /*%2*/"+A" (*dest) +++ : /*%3*/"r" (compare_value), /*%4*/"r" (exchange_value) +++ : "memory" ); +++ +++ if (order != memory_order_relaxed) { +++ FULL_MEM_BARRIER; +++ } +++ return old_value; +++} +++ +++template +++struct Atomic::PlatformOrderedLoad +++{ +++ template +++ T operator()(const volatile T* p) const { T data; __atomic_load(const_cast(p), &data, __ATOMIC_ACQUIRE); return data; } +++}; +++ +++template +++struct Atomic::PlatformOrderedStore +++{ +++ template +++ void operator()(volatile T* p, T v) const { __atomic_store(const_cast(p), &v, __ATOMIC_RELEASE); } +++}; +++ +++template +++struct Atomic::PlatformOrderedStore +++{ +++ template +++ void operator()(volatile T* p, T v) const { release_store(p, v); OrderAccess::fence(); } +++}; +++ +++#endif // OS_CPU_LINUX_RISCV_ATOMIC_LINUX_RISCV_HPP ++--- /dev/null +++++ b/src/hotspot/os_cpu/linux_riscv/bytes_linux_riscv.hpp ++@@ -0,0 +1,45 @@ +++/* +++ * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#ifndef OS_CPU_LINUX_RISCV_BYTES_LINUX_RISCV_HPP +++#define OS_CPU_LINUX_RISCV_BYTES_LINUX_RISCV_HPP +++ +++#include +++ +++// Efficient swapping of data bytes from Java byte +++// ordering to native byte ordering and vice versa. +++inline u2 Bytes::swap_u2(u2 x) { +++ return bswap_16(x); +++} +++ +++inline u4 Bytes::swap_u4(u4 x) { +++ return bswap_32(x); +++} +++ +++inline u8 Bytes::swap_u8(u8 x) { +++ return bswap_64(x); +++} +++ +++#endif // OS_CPU_LINUX_RISCV_BYTES_LINUX_RISCV_HPP ++--- /dev/null +++++ b/src/hotspot/os_cpu/linux_riscv/copy_linux_riscv.hpp ++@@ -0,0 +1,31 @@ +++/* +++ * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#ifndef OS_CPU_LINUX_RISCV_VM_COPY_LINUX_RISCV_HPP +++#define OS_CPU_LINUX_RISCV_VM_COPY_LINUX_RISCV_HPP +++ +++// Empty for build system +++ +++#endif // OS_CPU_LINUX_RISCV_VM_COPY_LINUX_RISCV_HPP ++--- /dev/null +++++ b/src/hotspot/os_cpu/linux_riscv/gc/z/zSyscall_linux_riscv.hpp ++@@ -0,0 +1,42 @@ +++/* +++ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#ifndef OS_CPU_LINUX_RISCV_GC_Z_ZSYSCALL_LINUX_RISCV_HPP +++#define OS_CPU_LINUX_RISCV_GC_Z_ZSYSCALL_LINUX_RISCV_HPP +++ +++#include +++ +++// +++// Support for building on older Linux systems +++// +++ +++#ifndef SYS_memfd_create +++#define SYS_memfd_create 279 +++#endif +++#ifndef SYS_fallocate +++#define SYS_fallocate 47 +++#endif +++ +++#endif // OS_CPU_LINUX_RISCV_GC_Z_ZSYSCALL_LINUX_RISCV_HPP ++--- /dev/null +++++ b/src/hotspot/os_cpu/linux_riscv/globals_linux_riscv.hpp ++@@ -0,0 +1,43 @@ +++/* +++ * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#ifndef OS_CPU_LINUX_RISCV_VM_GLOBALS_LINUX_RISCV_HPP +++#define OS_CPU_LINUX_RISCV_VM_GLOBALS_LINUX_RISCV_HPP +++ +++// Sets the default values for platform dependent flags used by the runtime system. +++// (see globals.hpp) +++ +++define_pd_global(bool, DontYieldALot, false); +++define_pd_global(intx, ThreadStackSize, 2048); // 0 => use system default +++define_pd_global(intx, VMThreadStackSize, 2048); +++ +++define_pd_global(intx, CompilerThreadStackSize, 2048); +++ +++define_pd_global(uintx, JVMInvokeMethodSlack, 8192); +++ +++// Used on 64 bit platforms for UseCompressedOops base address +++define_pd_global(uintx, HeapBaseMinAddress, 2 * G); +++ +++#endif // OS_CPU_LINUX_RISCV_VM_GLOBALS_LINUX_RISCV_HPP ++--- /dev/null +++++ b/src/hotspot/os_cpu/linux_riscv/orderAccess_linux_riscv.hpp ++@@ -0,0 +1,59 @@ +++/* +++ * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#ifndef OS_CPU_LINUX_RISCV_ORDERACCESS_LINUX_RISCV_HPP +++#define OS_CPU_LINUX_RISCV_ORDERACCESS_LINUX_RISCV_HPP +++ +++// Included in orderAccess.hpp header file. +++ +++#include "runtime/vm_version.hpp" +++ +++// Implementation of class OrderAccess. +++ +++inline void OrderAccess::loadload() { acquire(); } +++inline void OrderAccess::storestore() { release(); } +++inline void OrderAccess::loadstore() { acquire(); } +++inline void OrderAccess::storeload() { fence(); } +++ +++#define FULL_MEM_BARRIER __sync_synchronize() +++#define READ_MEM_BARRIER __atomic_thread_fence(__ATOMIC_ACQUIRE); +++#define WRITE_MEM_BARRIER __atomic_thread_fence(__ATOMIC_RELEASE); +++ +++inline void OrderAccess::acquire() { +++ READ_MEM_BARRIER; +++} +++ +++inline void OrderAccess::release() { +++ WRITE_MEM_BARRIER; +++} +++ +++inline void OrderAccess::fence() { +++ FULL_MEM_BARRIER; +++} +++ +++inline void OrderAccess::cross_modify_fence_impl() { +++} +++ +++#endif // OS_CPU_LINUX_RISCV_ORDERACCESS_LINUX_RISCV_HPP ++--- /dev/null +++++ b/src/hotspot/os_cpu/linux_riscv/os_linux_riscv.cpp ++@@ -0,0 +1,475 @@ +++/* +++ * Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++// no precompiled headers +++#include "asm/macroAssembler.hpp" +++#include "classfile/vmSymbols.hpp" +++#include "code/codeCache.hpp" +++#include "code/icBuffer.hpp" +++#include "code/nativeInst.hpp" +++#include "code/vtableStubs.hpp" +++#include "interpreter/interpreter.hpp" +++#include "jvm.h" +++#include "memory/allocation.inline.hpp" +++#include "os_share_linux.hpp" +++#include "prims/jniFastGetField.hpp" +++#include "prims/jvm_misc.hpp" +++#include "runtime/arguments.hpp" +++#include "runtime/frame.inline.hpp" +++#include "runtime/interfaceSupport.inline.hpp" +++#include "runtime/java.hpp" +++#include "runtime/javaCalls.hpp" +++#include "runtime/mutexLocker.hpp" +++#include "runtime/osThread.hpp" +++#include "runtime/safepointMechanism.hpp" +++#include "runtime/sharedRuntime.hpp" +++#include "runtime/stubRoutines.hpp" +++#include "runtime/thread.inline.hpp" +++#include "runtime/timer.hpp" +++#include "signals_posix.hpp" +++#include "utilities/debug.hpp" +++#include "utilities/events.hpp" +++#include "utilities/vmError.hpp" +++ +++// put OS-includes here +++# include +++# include +++# include +++# include +++# include +++# include +++# include +++# include +++# include +++# include +++# include +++# include +++# include +++# include +++# include +++# include +++# include +++# include +++# include +++ +++#define REG_LR 1 +++#define REG_FP 8 +++ +++NOINLINE address os::current_stack_pointer() { +++ return (address)__builtin_frame_address(0); +++} +++ +++char* os::non_memory_address_word() { +++ // Must never look like an address returned by reserve_memory, +++ // even in its subfields (as defined by the CPU immediate fields, +++ // if the CPU splits constants across multiple instructions). +++ +++ return (char*) 0xffffffffffff; +++} +++ +++address os::Posix::ucontext_get_pc(const ucontext_t * uc) { +++ return (address)uc->uc_mcontext.__gregs[REG_PC]; +++} +++ +++void os::Posix::ucontext_set_pc(ucontext_t * uc, address pc) { +++ uc->uc_mcontext.__gregs[REG_PC] = (intptr_t)pc; +++} +++ +++intptr_t* os::Linux::ucontext_get_sp(const ucontext_t * uc) { +++ return (intptr_t*)uc->uc_mcontext.__gregs[REG_SP]; +++} +++ +++intptr_t* os::Linux::ucontext_get_fp(const ucontext_t * uc) { +++ return (intptr_t*)uc->uc_mcontext.__gregs[REG_FP]; +++} +++ +++address os::fetch_frame_from_context(const void* ucVoid, +++ intptr_t** ret_sp, intptr_t** ret_fp) { +++ address epc; +++ const ucontext_t* uc = (const ucontext_t*)ucVoid; +++ +++ if (uc != NULL) { +++ epc = os::Posix::ucontext_get_pc(uc); +++ if (ret_sp != NULL) { +++ *ret_sp = os::Linux::ucontext_get_sp(uc); +++ } +++ if (ret_fp != NULL) { +++ *ret_fp = os::Linux::ucontext_get_fp(uc); +++ } +++ } else { +++ epc = NULL; +++ if (ret_sp != NULL) { +++ *ret_sp = (intptr_t *)NULL; +++ } +++ if (ret_fp != NULL) { +++ *ret_fp = (intptr_t *)NULL; +++ } +++ } +++ +++ return epc; +++} +++ +++frame os::fetch_compiled_frame_from_context(const void* ucVoid) { +++ const ucontext_t* uc = (const ucontext_t*)ucVoid; +++ // In compiled code, the stack banging is performed before RA +++ // has been saved in the frame. RA is live, and SP and FP +++ // belong to the caller. +++ intptr_t* frame_fp = os::Linux::ucontext_get_fp(uc); +++ intptr_t* frame_sp = os::Linux::ucontext_get_sp(uc); +++ address frame_pc = (address)(uc->uc_mcontext.__gregs[REG_LR] +++ - NativeInstruction::instruction_size); +++ return frame(frame_sp, frame_fp, frame_pc); +++} +++ +++frame os::fetch_frame_from_context(const void* ucVoid) { +++ intptr_t* frame_sp = NULL; +++ intptr_t* frame_fp = NULL; +++ address epc = fetch_frame_from_context(ucVoid, &frame_sp, &frame_fp); +++ if (!is_readable_pointer(epc)) { +++ // Try to recover from calling into bad memory +++ // Assume new frame has not been set up, the same as +++ // compiled frame stack bang +++ return fetch_compiled_frame_from_context(ucVoid); +++ } +++ return frame(frame_sp, frame_fp, epc); +++} +++ +++// By default, gcc always saves frame pointer rfp on this stack. This +++// may get turned off by -fomit-frame-pointer. +++frame os::get_sender_for_C_frame(frame* fr) { +++ return frame(fr->sender_sp(), fr->link(), fr->sender_pc()); +++} +++ +++NOINLINE frame os::current_frame() { +++ intptr_t **sender_sp = (intptr_t **)__builtin_frame_address(0); +++ if (sender_sp != NULL) { +++ frame myframe((intptr_t*)os::current_stack_pointer(), +++ sender_sp[frame::link_offset], +++ CAST_FROM_FN_PTR(address, os::current_frame)); +++ if (os::is_first_C_frame(&myframe)) { +++ // stack is not walkable +++ return frame(); +++ } else { +++ return os::get_sender_for_C_frame(&myframe); +++ } +++ } else { +++ ShouldNotReachHere(); +++ return frame(); +++ } +++} +++ +++// Utility functions +++bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info, +++ ucontext_t* uc, JavaThread* thread) { +++ +++ // decide if this trap can be handled by a stub +++ address stub = NULL; +++ +++ address pc = NULL; +++ +++ //%note os_trap_1 +++ if (info != NULL && uc != NULL && thread != NULL) { +++ pc = (address) os::Posix::ucontext_get_pc(uc); +++ +++ address addr = (address) info->si_addr; +++ +++ // Make sure the high order byte is sign extended, as it may be masked away by the hardware. +++ if ((uintptr_t(addr) & (uintptr_t(1) << 55)) != 0) { +++ addr = address(uintptr_t(addr) | (uintptr_t(0xFF) << 56)); +++ } +++ +++ // Handle ALL stack overflow variations here +++ if (sig == SIGSEGV) { +++ // check if fault address is within thread stack +++ if (thread->is_in_full_stack(addr)) { +++ if (os::Posix::handle_stack_overflow(thread, addr, pc, uc, &stub)) { +++ return true; // continue +++ } +++ } +++ } +++ +++ if (thread->thread_state() == _thread_in_Java) { +++ // Java thread running in Java code => find exception handler if any +++ // a fault inside compiled code, the interpreter, or a stub +++ +++ // Handle signal from NativeJump::patch_verified_entry(). +++ if ((sig == SIGILL || sig == SIGTRAP) +++ && nativeInstruction_at(pc)->is_sigill_zombie_not_entrant()) { +++ if (TraceTraps) { +++ tty->print_cr("trap: zombie_not_entrant (%s)", (sig == SIGTRAP) ? "SIGTRAP" : "SIGILL"); +++ } +++ stub = SharedRuntime::get_handle_wrong_method_stub(); +++ } else if (sig == SIGSEGV && SafepointMechanism::is_poll_address((address)info->si_addr)) { +++ stub = SharedRuntime::get_poll_stub(pc); +++ } else if (sig == SIGBUS /* && info->si_code == BUS_OBJERR */) { +++ // BugId 4454115: A read from a MappedByteBuffer can fault +++ // here if the underlying file has been truncated. +++ // Do not crash the VM in such a case. +++ CodeBlob* cb = CodeCache::find_blob_unsafe(pc); +++ CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL; +++ bool is_unsafe_arraycopy = (thread->doing_unsafe_access() && UnsafeCopyMemory::contains_pc(pc)); +++ if ((nm != NULL && nm->has_unsafe_access()) || is_unsafe_arraycopy) { +++ address next_pc = pc + NativeCall::instruction_size; +++ if (is_unsafe_arraycopy) { +++ next_pc = UnsafeCopyMemory::page_error_continue_pc(pc); +++ } +++ stub = SharedRuntime::handle_unsafe_access(thread, next_pc); +++ } +++ } else if (sig == SIGILL && nativeInstruction_at(pc)->is_stop()) { +++ // Pull a pointer to the error message out of the instruction +++ // stream. +++ const uint64_t *detail_msg_ptr +++ = (uint64_t*)(pc + NativeInstruction::instruction_size); +++ const char *detail_msg = (const char *)*detail_msg_ptr; +++ const char *msg = "stop"; +++ if (TraceTraps) { +++ tty->print_cr("trap: %s: (SIGILL)", msg); +++ } +++ +++ // End life with a fatal error, message and detail message and the context. +++ // Note: no need to do any post-processing here (e.g. signal chaining) +++ va_list va_dummy; +++ VMError::report_and_die(thread, uc, NULL, 0, msg, detail_msg, va_dummy); +++ va_end(va_dummy); +++ +++ ShouldNotReachHere(); +++ } else if (sig == SIGFPE && +++ (info->si_code == FPE_INTDIV || info->si_code == FPE_FLTDIV)) { +++ stub = +++ SharedRuntime:: +++ continuation_for_implicit_exception(thread, +++ pc, +++ SharedRuntime:: +++ IMPLICIT_DIVIDE_BY_ZERO); +++ } else if (sig == SIGSEGV && +++ MacroAssembler::uses_implicit_null_check((void*)addr)) { +++ // Determination of interpreter/vtable stub/compiled code null exception +++ stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL); +++ } +++ } else if ((thread->thread_state() == _thread_in_vm || +++ thread->thread_state() == _thread_in_native) && +++ sig == SIGBUS && /* info->si_code == BUS_OBJERR && */ +++ thread->doing_unsafe_access()) { +++ address next_pc = pc + NativeCall::instruction_size; +++ if (UnsafeCopyMemory::contains_pc(pc)) { +++ next_pc = UnsafeCopyMemory::page_error_continue_pc(pc); +++ } +++ stub = SharedRuntime::handle_unsafe_access(thread, next_pc); +++ } +++ +++ // jni_fast_GetField can trap at certain pc's if a GC kicks in +++ // and the heap gets shrunk before the field access. +++ if ((sig == SIGSEGV) || (sig == SIGBUS)) { +++ address addr_slow = JNI_FastGetField::find_slowcase_pc(pc); +++ if (addr_slow != (address)-1) { +++ stub = addr_slow; +++ } +++ } +++ } +++ +++ if (stub != NULL) { +++ // save all thread context in case we need to restore it +++ if (thread != NULL) { +++ thread->set_saved_exception_pc(pc); +++ } +++ +++ os::Posix::ucontext_set_pc(uc, stub); +++ return true; +++ } +++ +++ return false; // Mute compiler +++} +++ +++void os::Linux::init_thread_fpu_state(void) { +++} +++ +++int os::Linux::get_fpu_control_word(void) { +++ return 0; +++} +++ +++void os::Linux::set_fpu_control_word(int fpu_control) { +++} +++ +++//////////////////////////////////////////////////////////////////////////////// +++// thread stack +++ +++// Minimum usable stack sizes required to get to user code. Space for +++// HotSpot guard pages is added later. +++size_t os::Posix::_compiler_thread_min_stack_allowed = 72 * K; +++size_t os::Posix::_java_thread_min_stack_allowed = 72 * K; +++size_t os::Posix::_vm_internal_thread_min_stack_allowed = 72 * K; +++ +++// return default stack size for thr_type +++size_t os::Posix::default_stack_size(os::ThreadType thr_type) { +++ // default stack size (compiler thread needs larger stack) +++ size_t s = (thr_type == os::compiler_thread ? 4 * M : 1 * M); +++ return s; +++} +++ +++///////////////////////////////////////////////////////////////////////////// +++// helper functions for fatal error handler +++ +++static const char* reg_abi_names[] = { +++ "pc", +++ "x1(ra)", "x2(sp)", "x3(gp)", "x4(tp)", +++ "x5(t0)", "x6(t1)", "x7(t2)", +++ "x8(s0)", "x9(s1)", +++ "x10(a0)", "x11(a1)", "x12(a2)", "x13(a3)", "x14(a4)", "x15(a5)", "x16(a6)", "x17(a7)", +++ "x18(s2)", "x19(s3)", "x20(s4)", "x21(s5)", "x22(s6)", "x23(s7)", "x24(s8)", "x25(s9)", "x26(s10)", "x27(s11)", +++ "x28(t3)", "x29(t4)","x30(t5)", "x31(t6)" +++}; +++ +++void os::print_context(outputStream *st, const void *context) { +++ if (context == NULL) { +++ return; +++ } +++ +++ const ucontext_t *uc = (const ucontext_t*)context; +++ st->print_cr("Registers:"); +++ for (int r = 0; r < 32; r++) { +++ st->print("%-*.*s=", 8, 8, reg_abi_names[r]); +++ print_location(st, uc->uc_mcontext.__gregs[r]); +++ } +++ st->cr(); +++ +++ intptr_t *frame_sp = (intptr_t *)os::Linux::ucontext_get_sp(uc); +++ st->print_cr("Top of Stack: (sp=" PTR_FORMAT ")", p2i(frame_sp)); +++ print_hex_dump(st, (address)frame_sp, (address)(frame_sp + 64), sizeof(intptr_t)); +++ st->cr(); +++ +++ // Note: it may be unsafe to inspect memory near pc. For example, pc may +++ // point to garbage if entry point in an nmethod is corrupted. Leave +++ // this at the end, and hope for the best. +++ address pc = os::fetch_frame_from_context(uc).pc(); +++ print_instructions(st, pc, UseRVC ? sizeof(char) : (int)NativeInstruction::instruction_size); +++ st->cr(); +++} +++ +++void os::print_register_info(outputStream *st, const void *context) { +++ if (context == NULL) { +++ return; +++ } +++ +++ const ucontext_t *uc = (const ucontext_t*)context; +++ +++ st->print_cr("Register to memory mapping:"); +++ st->cr(); +++ +++ // this is horrendously verbose but the layout of the registers in the +++ // context does not match how we defined our abstract Register set, so +++ // we can't just iterate through the gregs area +++ +++ // this is only for the "general purpose" registers +++ +++ for (int r = 0; r < 32; r++) +++ st->print_cr("%-*.*s=" INTPTR_FORMAT, 8, 8, reg_abi_names[r], (uintptr_t)uc->uc_mcontext.__gregs[r]); +++ st->cr(); +++} +++ +++void os::setup_fpu() { +++} +++ +++#ifndef PRODUCT +++void os::verify_stack_alignment() { +++ assert(((intptr_t)os::current_stack_pointer() & (StackAlignmentInBytes-1)) == 0, "incorrect stack alignment"); +++} +++#endif +++ +++int os::extra_bang_size_in_bytes() { +++ return 0; +++} +++ +++extern "C" { +++ int SpinPause() { +++ return 0; +++ } +++ +++ void _Copy_conjoint_jshorts_atomic(const jshort* from, jshort* to, size_t count) { +++ if (from > to) { +++ const jshort *end = from + count; +++ while (from < end) { +++ *(to++) = *(from++); +++ } +++ } else if (from < to) { +++ const jshort *end = from; +++ from += count - 1; +++ to += count - 1; +++ while (from >= end) { +++ *(to--) = *(from--); +++ } +++ } +++ } +++ void _Copy_conjoint_jints_atomic(const jint* from, jint* to, size_t count) { +++ if (from > to) { +++ const jint *end = from + count; +++ while (from < end) { +++ *(to++) = *(from++); +++ } +++ } else if (from < to) { +++ const jint *end = from; +++ from += count - 1; +++ to += count - 1; +++ while (from >= end) { +++ *(to--) = *(from--); +++ } +++ } +++ } +++ void _Copy_conjoint_jlongs_atomic(const jlong* from, jlong* to, size_t count) { +++ if (from > to) { +++ const jlong *end = from + count; +++ while (from < end) { +++ os::atomic_copy64(from++, to++); +++ } +++ } else if (from < to) { +++ const jlong *end = from; +++ from += count - 1; +++ to += count - 1; +++ while (from >= end) { +++ os::atomic_copy64(from--, to--); +++ } +++ } +++ } +++ +++ void _Copy_arrayof_conjoint_bytes(const HeapWord* from, +++ HeapWord* to, +++ size_t count) { +++ memmove(to, from, count); +++ } +++ void _Copy_arrayof_conjoint_jshorts(const HeapWord* from, +++ HeapWord* to, +++ size_t count) { +++ memmove(to, from, count * 2); +++ } +++ void _Copy_arrayof_conjoint_jints(const HeapWord* from, +++ HeapWord* to, +++ size_t count) { +++ memmove(to, from, count * 4); +++ } +++ void _Copy_arrayof_conjoint_jlongs(const HeapWord* from, +++ HeapWord* to, +++ size_t count) { +++ memmove(to, from, count * 8); +++ } +++}; ++--- /dev/null +++++ b/src/hotspot/os_cpu/linux_riscv/os_linux_riscv.hpp ++@@ -0,0 +1,40 @@ +++/* +++ * Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#ifndef OS_CPU_LINUX_RISCV_VM_OS_LINUX_RISCV_HPP +++#define OS_CPU_LINUX_RISCV_VM_OS_LINUX_RISCV_HPP +++ +++ static void setup_fpu(); +++ +++ // Used to register dynamic code cache area with the OS +++ // Note: Currently only used in 64 bit Windows implementations +++ static bool register_code_area(char *low, char *high) { return true; } +++ +++ // Atomically copy 64 bits of data +++ static void atomic_copy64(const volatile void *src, volatile void *dst) { +++ *(jlong *) dst = *(const jlong *) src; +++ } +++ +++#endif // OS_CPU_LINUX_RISCV_VM_OS_LINUX_RISCV_HPP ++--- /dev/null +++++ b/src/hotspot/os_cpu/linux_riscv/prefetch_linux_riscv.inline.hpp ++@@ -0,0 +1,38 @@ +++/* +++ * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#ifndef OS_CPU_LINUX_RISCV_VM_PREFETCH_LINUX_RISCV_INLINE_HPP +++#define OS_CPU_LINUX_RISCV_VM_PREFETCH_LINUX_RISCV_INLINE_HPP +++ +++#include "runtime/prefetch.hpp" +++ +++ +++inline void Prefetch::read (void *loc, intx interval) { +++} +++ +++inline void Prefetch::write(void *loc, intx interval) { +++} +++ +++#endif // OS_CPU_LINUX_RISCV_VM_PREFETCH_LINUX_RISCV_INLINE_HPP ++--- /dev/null +++++ b/src/hotspot/os_cpu/linux_riscv/thread_linux_riscv.cpp ++@@ -0,0 +1,92 @@ +++/* +++ * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#include "precompiled.hpp" +++#include "runtime/frame.inline.hpp" +++#include "runtime/thread.inline.hpp" +++ +++frame JavaThread::pd_last_frame() { +++ assert(has_last_Java_frame(), "must have last_Java_sp() when suspended"); +++ return frame(_anchor.last_Java_sp(), _anchor.last_Java_fp(), _anchor.last_Java_pc()); +++} +++ +++// For Forte Analyzer AsyncGetCallTrace profiling support - thread is +++// currently interrupted by SIGPROF +++bool JavaThread::pd_get_top_frame_for_signal_handler(frame* fr_addr, +++ void* ucontext, bool isInJava) { +++ +++ assert(Thread::current() == this, "caller must be current thread"); +++ return pd_get_top_frame(fr_addr, ucontext, isInJava); +++} +++ +++bool JavaThread::pd_get_top_frame_for_profiling(frame* fr_addr, void* ucontext, bool isInJava) { +++ return pd_get_top_frame(fr_addr, ucontext, isInJava); +++} +++ +++bool JavaThread::pd_get_top_frame(frame* fr_addr, void* ucontext, bool isInJava) { +++ // If we have a last_Java_frame, then we should use it even if +++ // isInJava == true. It should be more reliable than ucontext info. +++ if (has_last_Java_frame() && frame_anchor()->walkable()) { +++ *fr_addr = pd_last_frame(); +++ return true; +++ } +++ +++ // At this point, we don't have a last_Java_frame, so +++ // we try to glean some information out of the ucontext +++ // if we were running Java code when SIGPROF came in. +++ if (isInJava) { +++ ucontext_t* uc = (ucontext_t*) ucontext; +++ +++ intptr_t* ret_fp = NULL; +++ intptr_t* ret_sp = NULL; +++ address addr = os::fetch_frame_from_context(uc, &ret_sp, &ret_fp); +++ if (addr == NULL || ret_sp == NULL ) { +++ // ucontext wasn't useful +++ return false; +++ } +++ +++ frame ret_frame(ret_sp, ret_fp, addr); +++ if (!ret_frame.safe_for_sender(this)) { +++#ifdef COMPILER2 +++ frame ret_frame2(ret_sp, NULL, addr); +++ if (!ret_frame2.safe_for_sender(this)) { +++ // nothing else to try if the frame isn't good +++ return false; +++ } +++ ret_frame = ret_frame2; +++#else +++ // nothing else to try if the frame isn't good +++ return false; +++#endif /* COMPILER2 */ +++ } +++ *fr_addr = ret_frame; +++ return true; +++ } +++ +++ // nothing else to try +++ return false; +++} +++ +++void JavaThread::cache_global_variables() { } ++--- /dev/null +++++ b/src/hotspot/os_cpu/linux_riscv/thread_linux_riscv.hpp ++@@ -0,0 +1,48 @@ +++/* +++ * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#ifndef OS_CPU_LINUX_RISCV_THREAD_LINUX_RISCV_HPP +++#define OS_CPU_LINUX_RISCV_THREAD_LINUX_RISCV_HPP +++ +++ private: +++ void pd_initialize() { +++ _anchor.clear(); +++ } +++ +++ frame pd_last_frame(); +++ +++ public: +++ static ByteSize last_Java_fp_offset() { +++ return byte_offset_of(JavaThread, _anchor) + JavaFrameAnchor::last_Java_fp_offset(); +++ } +++ +++ bool pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext, +++ bool isInJava); +++ +++ bool pd_get_top_frame_for_profiling(frame* fr_addr, void* ucontext, bool isInJava); +++private: +++ bool pd_get_top_frame(frame* fr_addr, void* ucontext, bool isInJava); +++ +++#endif // OS_CPU_LINUX_RISCV_THREAD_LINUX_RISCV_HPP ++--- /dev/null +++++ b/src/hotspot/os_cpu/linux_riscv/vmStructs_linux_riscv.hpp ++@@ -0,0 +1,55 @@ +++/* +++ * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#ifndef OS_CPU_LINUX_RISCV_VM_VMSTRUCTS_LINUX_RISCV_HPP +++#define OS_CPU_LINUX_RISCV_VM_VMSTRUCTS_LINUX_RISCV_HPP +++ +++// These are the OS and CPU-specific fields, types and integer +++// constants required by the Serviceability Agent. This file is +++// referenced by vmStructs.cpp. +++ +++#define VM_STRUCTS_OS_CPU(nonstatic_field, static_field, unchecked_nonstatic_field, volatile_nonstatic_field, nonproduct_nonstatic_field, c2_nonstatic_field, unchecked_c1_static_field, unchecked_c2_static_field) \ +++ \ +++ /******************************/ \ +++ /* Threads (NOTE: incomplete) */ \ +++ /******************************/ \ +++ nonstatic_field(OSThread, _thread_id, OSThread::thread_id_t) \ +++ nonstatic_field(OSThread, _pthread_id, pthread_t) +++ +++ +++#define VM_TYPES_OS_CPU(declare_type, declare_toplevel_type, declare_oop_type, declare_integer_type, declare_unsigned_integer_type, declare_c1_toplevel_type, declare_c2_type, declare_c2_toplevel_type) \ +++ \ +++ /**********************/ \ +++ /* Posix Thread IDs */ \ +++ /**********************/ \ +++ \ +++ declare_integer_type(OSThread::thread_id_t) \ +++ declare_unsigned_integer_type(pthread_t) +++ +++#define VM_INT_CONSTANTS_OS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant) +++ +++#define VM_LONG_CONSTANTS_OS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant) +++ +++#endif // OS_CPU_LINUX_RISCV_VM_VMSTRUCTS_LINUX_RISCV_HPP ++--- /dev/null +++++ b/src/hotspot/os_cpu/linux_riscv/vm_version_linux_riscv.cpp ++@@ -0,0 +1,137 @@ +++/* +++ * Copyright (c) 2006, 2021, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2021, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++#include "precompiled.hpp" +++#include "asm/register.hpp" +++#include "runtime/os.hpp" +++#include "runtime/os.inline.hpp" +++#include "runtime/vm_version.hpp" +++ +++#include +++#include +++ +++#ifndef HWCAP_ISA_I +++#define HWCAP_ISA_I (1 << ('I' - 'A')) +++#endif +++ +++#ifndef HWCAP_ISA_M +++#define HWCAP_ISA_M (1 << ('M' - 'A')) +++#endif +++ +++#ifndef HWCAP_ISA_A +++#define HWCAP_ISA_A (1 << ('A' - 'A')) +++#endif +++ +++#ifndef HWCAP_ISA_F +++#define HWCAP_ISA_F (1 << ('F' - 'A')) +++#endif +++ +++#ifndef HWCAP_ISA_D +++#define HWCAP_ISA_D (1 << ('D' - 'A')) +++#endif +++ +++#ifndef HWCAP_ISA_C +++#define HWCAP_ISA_C (1 << ('C' - 'A')) +++#endif +++ +++#ifndef HWCAP_ISA_V +++#define HWCAP_ISA_V (1 << ('V' - 'A')) +++#endif +++ +++#define read_csr(csr) \ +++({ \ +++ register unsigned long __v; \ +++ __asm__ __volatile__ ("csrr %0, %1" \ +++ : "=r" (__v) \ +++ : "i" (csr) \ +++ : "memory"); \ +++ __v; \ +++}) +++ +++uint32_t VM_Version::get_current_vector_length() { +++ assert(_features & CPU_V, "should not call this"); +++ return (uint32_t)read_csr(CSR_VLENB); +++} +++ +++VM_Version::VM_MODE VM_Version::get_satp_mode() { +++ if (!strcmp(_vm_mode, "sv39")) { +++ return VM_SV39; +++ } else if (!strcmp(_vm_mode, "sv48")) { +++ return VM_SV48; +++ } else if (!strcmp(_vm_mode, "sv57")) { +++ return VM_SV57; +++ } else if (!strcmp(_vm_mode, "sv64")) { +++ return VM_SV64; +++ } else { +++ return VM_MBARE; +++ } +++} +++ +++void VM_Version::get_os_cpu_info() { +++ +++ uint64_t auxv = getauxval(AT_HWCAP); +++ +++ static_assert(CPU_I == HWCAP_ISA_I, "Flag CPU_I must follow Linux HWCAP"); +++ static_assert(CPU_M == HWCAP_ISA_M, "Flag CPU_M must follow Linux HWCAP"); +++ static_assert(CPU_A == HWCAP_ISA_A, "Flag CPU_A must follow Linux HWCAP"); +++ static_assert(CPU_F == HWCAP_ISA_F, "Flag CPU_F must follow Linux HWCAP"); +++ static_assert(CPU_D == HWCAP_ISA_D, "Flag CPU_D must follow Linux HWCAP"); +++ static_assert(CPU_C == HWCAP_ISA_C, "Flag CPU_C must follow Linux HWCAP"); +++ static_assert(CPU_V == HWCAP_ISA_V, "Flag CPU_V must follow Linux HWCAP"); +++ +++ // RISC-V has four bit-manipulation ISA-extensions: Zba/Zbb/Zbc/Zbs. +++ // Availability for those extensions could not be queried from HWCAP. +++ // TODO: Add proper detection for those extensions. +++ _features = auxv & ( +++ HWCAP_ISA_I | +++ HWCAP_ISA_M | +++ HWCAP_ISA_A | +++ HWCAP_ISA_F | +++ HWCAP_ISA_D | +++ HWCAP_ISA_C | +++ HWCAP_ISA_V); +++ +++ if (FILE *f = fopen("/proc/cpuinfo", "r")) { +++ char buf[512], *p; +++ while (fgets(buf, sizeof (buf), f) != NULL) { +++ if ((p = strchr(buf, ':')) != NULL) { +++ if (strncmp(buf, "mmu", sizeof "mmu" - 1) == 0) { +++ if (_vm_mode[0] != '\0') { +++ continue; +++ } +++ char* vm_mode = os::strdup(p + 2); +++ vm_mode[strcspn(vm_mode, "\n")] = '\0'; +++ _vm_mode = vm_mode; +++ } else if (strncmp(buf, "uarch", sizeof "uarch" - 1) == 0) { +++ char* uarch = os::strdup(p + 2); +++ uarch[strcspn(uarch, "\n")] = '\0'; +++ _uarch = uarch; +++ break; +++ } +++ } +++ } +++ fclose(f); +++ } +++} ++--- a/src/hotspot/share/c1/c1_LIR.cpp +++++ b/src/hotspot/share/c1/c1_LIR.cpp ++@@ -1,5 +1,5 @@ ++ /* ++- * Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2000, 2022, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++@@ -188,6 +188,9 @@ void LIR_Op2::verify() const { ++ #ifdef ASSERT ++ switch (code()) { ++ case lir_cmove: +++#ifdef RISCV +++ assert(false, "lir_cmove is LIR_Op4 on RISCV"); +++#endif ++ case lir_xchg: ++ break; ++ ++@@ -238,8 +241,12 @@ void LIR_Op2::verify() const { ++ ++ ++ LIR_OpBranch::LIR_OpBranch(LIR_Condition cond, BlockBegin* block) +++#ifdef RISCV +++ : LIR_Op2(lir_branch, cond, LIR_OprFact::illegalOpr, LIR_OprFact::illegalOpr, (CodeEmitInfo*)NULL) +++#else ++ : LIR_Op(lir_branch, LIR_OprFact::illegalOpr, (CodeEmitInfo*)NULL) ++ , _cond(cond) +++#endif ++ , _label(block->label()) ++ , _block(block) ++ , _ublock(NULL) ++@@ -247,8 +254,12 @@ LIR_OpBranch::LIR_OpBranch(LIR_Condition ++ } ++ ++ LIR_OpBranch::LIR_OpBranch(LIR_Condition cond, CodeStub* stub) : +++#ifdef RISCV +++ LIR_Op2(lir_branch, cond, LIR_OprFact::illegalOpr, LIR_OprFact::illegalOpr, (CodeEmitInfo*)NULL) +++#else ++ LIR_Op(lir_branch, LIR_OprFact::illegalOpr, (CodeEmitInfo*)NULL) ++ , _cond(cond) +++#endif ++ , _label(stub->entry()) ++ , _block(NULL) ++ , _ublock(NULL) ++@@ -256,8 +267,12 @@ LIR_OpBranch::LIR_OpBranch(LIR_Condition ++ } ++ ++ LIR_OpBranch::LIR_OpBranch(LIR_Condition cond, BlockBegin* block, BlockBegin* ublock) +++#ifdef RISCV +++ : LIR_Op2(lir_cond_float_branch, cond, LIR_OprFact::illegalOpr, LIR_OprFact::illegalOpr, (CodeEmitInfo*)NULL) +++#else ++ : LIR_Op(lir_cond_float_branch, LIR_OprFact::illegalOpr, (CodeEmitInfo*)NULL) ++ , _cond(cond) +++#endif ++ , _label(block->label()) ++ , _block(block) ++ , _ublock(ublock) ++@@ -279,13 +294,13 @@ void LIR_OpBranch::change_ublock(BlockBe ++ } ++ ++ void LIR_OpBranch::negate_cond() { ++- switch (_cond) { ++- case lir_cond_equal: _cond = lir_cond_notEqual; break; ++- case lir_cond_notEqual: _cond = lir_cond_equal; break; ++- case lir_cond_less: _cond = lir_cond_greaterEqual; break; ++- case lir_cond_lessEqual: _cond = lir_cond_greater; break; ++- case lir_cond_greaterEqual: _cond = lir_cond_less; break; ++- case lir_cond_greater: _cond = lir_cond_lessEqual; break; +++ switch (cond()) { +++ case lir_cond_equal: set_cond(lir_cond_notEqual); break; +++ case lir_cond_notEqual: set_cond(lir_cond_equal); break; +++ case lir_cond_less: set_cond(lir_cond_greaterEqual); break; +++ case lir_cond_lessEqual: set_cond(lir_cond_greater); break; +++ case lir_cond_greaterEqual: set_cond(lir_cond_less); break; +++ case lir_cond_greater: set_cond(lir_cond_lessEqual); break; ++ default: ShouldNotReachHere(); ++ } ++ } ++@@ -513,6 +528,15 @@ void LIR_OpVisitState::visit(LIR_Op* op) ++ assert(op->as_OpBranch() != NULL, "must be"); ++ LIR_OpBranch* opBranch = (LIR_OpBranch*)op; ++ +++#ifdef RISCV +++ assert(opBranch->_tmp1->is_illegal() && opBranch->_tmp2->is_illegal() && +++ opBranch->_tmp3->is_illegal() && opBranch->_tmp4->is_illegal() && +++ opBranch->_tmp5->is_illegal(), "not used"); +++ +++ if (opBranch->_opr1->is_valid()) do_input(opBranch->_opr1); +++ if (opBranch->_opr2->is_valid()) do_input(opBranch->_opr2); +++#endif +++ ++ if (opBranch->_info != NULL) do_info(opBranch->_info); ++ assert(opBranch->_result->is_illegal(), "not used"); ++ if (opBranch->_stub != NULL) opBranch->stub()->visit(this); ++@@ -601,6 +625,21 @@ void LIR_OpVisitState::visit(LIR_Op* op) ++ // to the result operand, otherwise the backend fails ++ case lir_cmove: ++ { +++#ifdef RISCV +++ assert(op->as_Op4() != NULL, "must be"); +++ LIR_Op4* op4 = (LIR_Op4*)op; +++ +++ assert(op4->_info == NULL && op4->_tmp1->is_illegal() && op4->_tmp2->is_illegal() && +++ op4->_tmp3->is_illegal() && op4->_tmp4->is_illegal() && op4->_tmp5->is_illegal(), "not used"); +++ assert(op4->_opr1->is_valid() && op4->_opr2->is_valid() && op4->_result->is_valid(), "used"); +++ +++ do_input(op4->_opr1); +++ do_input(op4->_opr2); +++ if (op4->_opr3->is_valid()) do_input(op4->_opr3); +++ if (op4->_opr4->is_valid()) do_input(op4->_opr4); +++ do_temp(op4->_opr2); +++ do_output(op4->_result); +++#else ++ assert(op->as_Op2() != NULL, "must be"); ++ LIR_Op2* op2 = (LIR_Op2*)op; ++ ++@@ -612,6 +651,7 @@ void LIR_OpVisitState::visit(LIR_Op* op) ++ do_input(op2->_opr2); ++ do_temp(op2->_opr2); ++ do_output(op2->_result); +++#endif ++ ++ break; ++ } ++@@ -1042,6 +1082,12 @@ void LIR_Op3::emit_code(LIR_Assembler* m ++ masm->emit_op3(this); ++ } ++ +++#ifdef RISCV +++void LIR_Op4::emit_code(LIR_Assembler* masm) { +++ masm->emit_op4(this); +++} +++#endif +++ ++ void LIR_OpLock::emit_code(LIR_Assembler* masm) { ++ masm->emit_lock(this); ++ if (stub()) { ++@@ -1078,6 +1124,10 @@ LIR_List::LIR_List(Compilation* compilat ++ , _file(NULL) ++ , _line(0) ++ #endif +++#ifdef RISCV +++ , _cmp_opr1(LIR_OprFact::illegalOpr) +++ , _cmp_opr2(LIR_OprFact::illegalOpr) +++#endif ++ { } ++ ++ ++@@ -1095,6 +1145,38 @@ void LIR_List::set_file_and_line(const c ++ } ++ #endif ++ +++#ifdef RISCV +++void LIR_List::set_cmp_oprs(LIR_Op* op) { +++ switch (op->code()) { +++ case lir_cmp: +++ _cmp_opr1 = op->as_Op2()->in_opr1(); +++ _cmp_opr2 = op->as_Op2()->in_opr2(); +++ break; +++ case lir_branch: // fall through +++ case lir_cond_float_branch: +++ assert(op->as_OpBranch()->cond() == lir_cond_always || +++ (_cmp_opr1 != LIR_OprFact::illegalOpr && _cmp_opr2 != LIR_OprFact::illegalOpr), +++ "conditional branches must have legal operands"); +++ if (op->as_OpBranch()->cond() != lir_cond_always) { +++ op->as_Op2()->set_in_opr1(_cmp_opr1); +++ op->as_Op2()->set_in_opr2(_cmp_opr2); +++ } +++ break; +++ case lir_cmove: +++ op->as_Op4()->set_in_opr3(_cmp_opr1); +++ op->as_Op4()->set_in_opr4(_cmp_opr2); +++ break; +++#if INCLUDE_ZGC +++ case lir_zloadbarrier_test: +++ _cmp_opr1 = FrameMap::as_opr(t1); +++ _cmp_opr2 = LIR_OprFact::intConst(0); +++ break; +++#endif +++ default: +++ break; +++ } +++} +++#endif ++ ++ void LIR_List::append(LIR_InsertionBuffer* buffer) { ++ assert(this == buffer->lir_list(), "wrong lir list"); ++@@ -1825,6 +1907,10 @@ void LIR_Op1::print_patch_code(outputStr ++ // LIR_OpBranch ++ void LIR_OpBranch::print_instr(outputStream* out) const { ++ print_condition(out, cond()); out->print(" "); +++#ifdef RISCV +++ in_opr1()->print(out); out->print(" "); +++ in_opr2()->print(out); out->print(" "); +++#endif ++ if (block() != NULL) { ++ out->print("[B%d] ", block()->block_id()); ++ } else if (stub() != NULL) { ++@@ -1911,7 +1997,11 @@ void LIR_OpRoundFP::print_instr(outputSt ++ ++ // LIR_Op2 ++ void LIR_Op2::print_instr(outputStream* out) const { +++#ifdef RISCV +++ if (code() == lir_cmp || code() == lir_branch || code() == lir_cond_float_branch) { +++#else ++ if (code() == lir_cmove || code() == lir_cmp) { +++#endif ++ print_condition(out, condition()); out->print(" "); ++ } ++ in_opr1()->print(out); out->print(" "); ++@@ -1962,6 +2052,17 @@ void LIR_Op3::print_instr(outputStream* ++ result_opr()->print(out); ++ } ++ +++#ifdef RISCV +++// LIR_Op4 +++void LIR_Op4::print_instr(outputStream* out) const { +++ print_condition(out, condition()); out->print(" "); +++ in_opr1()->print(out); out->print(" "); +++ in_opr2()->print(out); out->print(" "); +++ in_opr3()->print(out); out->print(" "); +++ in_opr4()->print(out); out->print(" "); +++ result_opr()->print(out); +++} +++#endif ++ ++ void LIR_OpLock::print_instr(outputStream* out) const { ++ hdr_opr()->print(out); out->print(" "); ++--- a/src/hotspot/share/c1/c1_LIR.hpp +++++ b/src/hotspot/share/c1/c1_LIR.hpp ++@@ -1,5 +1,5 @@ ++ /* ++- * Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2000, 2022, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++@@ -869,6 +869,9 @@ class LIR_Op2; ++ class LIR_OpDelay; ++ class LIR_Op3; ++ class LIR_OpAllocArray; +++#ifdef RISCV +++class LIR_Op4; +++#endif ++ class LIR_OpCall; ++ class LIR_OpJavaCall; ++ class LIR_OpRTCall; ++@@ -913,8 +916,10 @@ enum LIR_Code { ++ , lir_null_check ++ , lir_return ++ , lir_leal +++#ifndef RISCV ++ , lir_branch ++ , lir_cond_float_branch +++#endif ++ , lir_move ++ , lir_convert ++ , lir_alloc_object ++@@ -924,11 +929,17 @@ enum LIR_Code { ++ , lir_unwind ++ , end_op1 ++ , begin_op2 +++#ifdef RISCV +++ , lir_branch +++ , lir_cond_float_branch +++#endif ++ , lir_cmp ++ , lir_cmp_l2i ++ , lir_ucmp_fd2i ++ , lir_cmp_fd2i +++#ifndef RISCV ++ , lir_cmove +++#endif ++ , lir_add ++ , lir_sub ++ , lir_mul ++@@ -956,6 +967,11 @@ enum LIR_Code { ++ , lir_fmad ++ , lir_fmaf ++ , end_op3 +++#ifdef RISCV +++ , begin_op4 +++ , lir_cmove +++ , end_op4 +++#endif ++ , begin_opJavaCall ++ , lir_static_call ++ , lir_optvirtual_call ++@@ -992,6 +1008,11 @@ enum LIR_Code { ++ , begin_opAssert ++ , lir_assert ++ , end_opAssert +++#if defined(RISCV) && defined(INCLUDE_ZGC) +++ , begin_opZLoadBarrierTest +++ , lir_zloadbarrier_test +++ , end_opZLoadBarrierTest +++#endif ++ }; ++ ++ ++@@ -1128,6 +1149,9 @@ class LIR_Op: public CompilationResource ++ virtual LIR_Op1* as_Op1() { return NULL; } ++ virtual LIR_Op2* as_Op2() { return NULL; } ++ virtual LIR_Op3* as_Op3() { return NULL; } +++#ifdef RISCV +++ virtual LIR_Op4* as_Op4() { return NULL; } +++#endif ++ virtual LIR_OpArrayCopy* as_OpArrayCopy() { return NULL; } ++ virtual LIR_OpUpdateCRC32* as_OpUpdateCRC32() { return NULL; } ++ virtual LIR_OpTypeCheck* as_OpTypeCheck() { return NULL; } ++@@ -1399,47 +1423,6 @@ class LIR_OpRTCall: public LIR_OpCall { ++ virtual void verify() const; ++ }; ++ ++- ++-class LIR_OpBranch: public LIR_Op { ++- friend class LIR_OpVisitState; ++- ++- private: ++- LIR_Condition _cond; ++- Label* _label; ++- BlockBegin* _block; // if this is a branch to a block, this is the block ++- BlockBegin* _ublock; // if this is a float-branch, this is the unorderd block ++- CodeStub* _stub; // if this is a branch to a stub, this is the stub ++- ++- public: ++- LIR_OpBranch(LIR_Condition cond, Label* lbl) ++- : LIR_Op(lir_branch, LIR_OprFact::illegalOpr, (CodeEmitInfo*) NULL) ++- , _cond(cond) ++- , _label(lbl) ++- , _block(NULL) ++- , _ublock(NULL) ++- , _stub(NULL) { } ++- ++- LIR_OpBranch(LIR_Condition cond, BlockBegin* block); ++- LIR_OpBranch(LIR_Condition cond, CodeStub* stub); ++- ++- // for unordered comparisons ++- LIR_OpBranch(LIR_Condition cond, BlockBegin* block, BlockBegin* ublock); ++- ++- LIR_Condition cond() const { return _cond; } ++- Label* label() const { return _label; } ++- BlockBegin* block() const { return _block; } ++- BlockBegin* ublock() const { return _ublock; } ++- CodeStub* stub() const { return _stub; } ++- ++- void change_block(BlockBegin* b); ++- void change_ublock(BlockBegin* b); ++- void negate_cond(); ++- ++- virtual void emit_code(LIR_Assembler* masm); ++- virtual LIR_OpBranch* as_OpBranch() { return this; } ++- virtual void print_instr(outputStream* out) const PRODUCT_RETURN; ++-}; ++- ++ class LIR_OpReturn: public LIR_Op1 { ++ friend class LIR_OpVisitState; ++ ++@@ -1612,19 +1595,19 @@ class LIR_Op2: public LIR_Op { ++ void verify() const; ++ ++ public: ++- LIR_Op2(LIR_Code code, LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, CodeEmitInfo* info = NULL) +++ LIR_Op2(LIR_Code code, LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, CodeEmitInfo* info = NULL, BasicType type = T_ILLEGAL) ++ : LIR_Op(code, LIR_OprFact::illegalOpr, info) ++ , _fpu_stack_size(0) ++ , _opr1(opr1) ++ , _opr2(opr2) ++- , _type(T_ILLEGAL) +++ , _type(type) ++ , _tmp1(LIR_OprFact::illegalOpr) ++ , _tmp2(LIR_OprFact::illegalOpr) ++ , _tmp3(LIR_OprFact::illegalOpr) ++ , _tmp4(LIR_OprFact::illegalOpr) ++ , _tmp5(LIR_OprFact::illegalOpr) ++ , _condition(condition) { ++- assert(code == lir_cmp || code == lir_assert, "code check"); +++ assert(code == lir_cmp || code == lir_assert RISCV_ONLY(|| code == lir_branch || code == lir_cond_float_branch), "code check"); ++ } ++ ++ LIR_Op2(LIR_Code code, LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) ++@@ -1656,7 +1639,7 @@ class LIR_Op2: public LIR_Op { ++ , _tmp4(LIR_OprFact::illegalOpr) ++ , _tmp5(LIR_OprFact::illegalOpr) ++ , _condition(lir_cond_unknown) { ++- assert(code != lir_cmp && is_in_range(code, begin_op2, end_op2), "code check"); +++ assert(code != lir_cmp && RISCV_ONLY(code != lir_branch && code != lir_cond_float_branch &&) is_in_range(code, begin_op2, end_op2), "code check"); ++ } ++ ++ LIR_Op2(LIR_Code code, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, LIR_Opr tmp1, LIR_Opr tmp2 = LIR_OprFact::illegalOpr, ++@@ -1672,7 +1655,7 @@ class LIR_Op2: public LIR_Op { ++ , _tmp4(tmp4) ++ , _tmp5(tmp5) ++ , _condition(lir_cond_unknown) { ++- assert(code != lir_cmp && is_in_range(code, begin_op2, end_op2), "code check"); +++ assert(code != lir_cmp && RISCV_ONLY(code != lir_branch && code != lir_cond_float_branch &&) is_in_range(code, begin_op2, end_op2), "code check"); ++ } ++ ++ LIR_Opr in_opr1() const { return _opr1; } ++@@ -1684,10 +1667,18 @@ class LIR_Op2: public LIR_Op { ++ LIR_Opr tmp4_opr() const { return _tmp4; } ++ LIR_Opr tmp5_opr() const { return _tmp5; } ++ LIR_Condition condition() const { +++#ifdef RISCV +++ assert(code() == lir_cmp || code() == lir_branch || code() == lir_cond_float_branch || code() == lir_assert, "only valid for branch and assert"); return _condition; +++#else ++ assert(code() == lir_cmp || code() == lir_cmove || code() == lir_assert, "only valid for cmp and cmove and assert"); return _condition; +++#endif ++ } ++ void set_condition(LIR_Condition condition) { +++#ifdef RISCV +++ assert(code() == lir_cmp || code() == lir_branch || code() == lir_cond_float_branch, "only valid for branch"); _condition = condition; +++#else ++ assert(code() == lir_cmp || code() == lir_cmove, "only valid for cmp and cmove"); _condition = condition; +++#endif ++ } ++ ++ void set_fpu_stack_size(int size) { _fpu_stack_size = size; } ++@@ -1701,6 +1692,62 @@ class LIR_Op2: public LIR_Op { ++ virtual void print_instr(outputStream* out) const PRODUCT_RETURN; ++ }; ++ +++#ifdef RISCV +++class LIR_OpBranch: public LIR_Op2 { +++#else +++class LIR_OpBranch: public LIR_Op { +++#endif +++ friend class LIR_OpVisitState; +++ +++ private: +++#ifndef RISCV +++ LIR_Condition _cond; +++#endif +++ Label* _label; +++ BlockBegin* _block; // if this is a branch to a block, this is the block +++ BlockBegin* _ublock; // if this is a float-branch, this is the unordered block +++ CodeStub* _stub; // if this is a branch to a stub, this is the stub +++ +++ public: +++ LIR_OpBranch(LIR_Condition cond, Label* lbl) +++#ifdef RISCV +++ : LIR_Op2(lir_branch, cond, LIR_OprFact::illegalOpr, LIR_OprFact::illegalOpr, (CodeEmitInfo*) NULL) +++#else +++ : LIR_Op(lir_branch, LIR_OprFact::illegalOpr, (CodeEmitInfo*) NULL) +++ , _cond(cond) +++#endif +++ , _label(lbl) +++ , _block(NULL) +++ , _ublock(NULL) +++ , _stub(NULL) { } +++ +++ LIR_OpBranch(LIR_Condition cond, BlockBegin* block); +++ LIR_OpBranch(LIR_Condition cond, CodeStub* stub); +++ +++ // for unordered comparisons +++ LIR_OpBranch(LIR_Condition cond, BlockBegin* block, BlockBegin* ublock); +++ +++#ifdef RISCV +++ LIR_Condition cond() const { return condition(); } +++ void set_cond(LIR_Condition cond) { set_condition(cond); } +++#else +++ LIR_Condition cond() const { return _cond; } +++ void set_cond(LIR_Condition cond) { _cond = cond; } +++#endif +++ Label* label() const { return _label; } +++ BlockBegin* block() const { return _block; } +++ BlockBegin* ublock() const { return _ublock; } +++ CodeStub* stub() const { return _stub; } +++ +++ void change_block(BlockBegin* b); +++ void change_ublock(BlockBegin* b); +++ void negate_cond(); +++ +++ virtual void emit_code(LIR_Assembler* masm); +++ virtual LIR_OpBranch* as_OpBranch() { return this; } +++ virtual void print_instr(outputStream* out) const PRODUCT_RETURN; +++}; +++ ++ class LIR_OpAllocArray : public LIR_Op { ++ friend class LIR_OpVisitState; ++ ++@@ -1764,6 +1811,65 @@ class LIR_Op3: public LIR_Op { ++ virtual void print_instr(outputStream* out) const PRODUCT_RETURN; ++ }; ++ +++#ifdef RISCV +++class LIR_Op4: public LIR_Op { +++ friend class LIR_OpVisitState; +++ protected: +++ LIR_Opr _opr1; +++ LIR_Opr _opr2; +++ LIR_Opr _opr3; +++ LIR_Opr _opr4; +++ BasicType _type; +++ LIR_Opr _tmp1; +++ LIR_Opr _tmp2; +++ LIR_Opr _tmp3; +++ LIR_Opr _tmp4; +++ LIR_Opr _tmp5; +++ LIR_Condition _condition; +++ +++ public: +++ LIR_Op4(LIR_Code code, LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr opr3, LIR_Opr opr4, +++ LIR_Opr result, BasicType type) +++ : LIR_Op(code, result, NULL) +++ , _opr1(opr1) +++ , _opr2(opr2) +++ , _opr3(opr3) +++ , _opr4(opr4) +++ , _type(type) +++ , _tmp1(LIR_OprFact::illegalOpr) +++ , _tmp2(LIR_OprFact::illegalOpr) +++ , _tmp3(LIR_OprFact::illegalOpr) +++ , _tmp4(LIR_OprFact::illegalOpr) +++ , _tmp5(LIR_OprFact::illegalOpr) +++ , _condition(condition) { +++ assert(code == lir_cmove, "code check"); +++ assert(type != T_ILLEGAL, "cmove should have type"); +++ } +++ +++ LIR_Opr in_opr1() const { return _opr1; } +++ LIR_Opr in_opr2() const { return _opr2; } +++ LIR_Opr in_opr3() const { return _opr3; } +++ LIR_Opr in_opr4() const { return _opr4; } +++ BasicType type() const { return _type; } +++ LIR_Opr tmp1_opr() const { return _tmp1; } +++ LIR_Opr tmp2_opr() const { return _tmp2; } +++ LIR_Opr tmp3_opr() const { return _tmp3; } +++ LIR_Opr tmp4_opr() const { return _tmp4; } +++ LIR_Opr tmp5_opr() const { return _tmp5; } +++ +++ LIR_Condition condition() const { return _condition; } +++ void set_condition(LIR_Condition condition) { _condition = condition; } +++ +++ void set_in_opr1(LIR_Opr opr) { _opr1 = opr; } +++ void set_in_opr2(LIR_Opr opr) { _opr2 = opr; } +++ void set_in_opr3(LIR_Opr opr) { _opr3 = opr; } +++ void set_in_opr4(LIR_Opr opr) { _opr4 = opr; } +++ virtual void emit_code(LIR_Assembler* masm); +++ virtual LIR_Op4* as_Op4() { return this; } +++ +++ virtual void print_instr(outputStream* out) const PRODUCT_RETURN; +++}; +++#endif ++ ++ //-------------------------------- ++ class LabelObj: public CompilationResourceObj { ++@@ -1986,6 +2092,10 @@ class LIR_List: public CompilationResour ++ const char * _file; ++ int _line; ++ #endif +++#ifdef RISCV +++ LIR_Opr _cmp_opr1; +++ LIR_Opr _cmp_opr2; +++#endif ++ ++ public: ++ void append(LIR_Op* op) { ++@@ -1998,6 +2108,12 @@ class LIR_List: public CompilationResour ++ } ++ #endif // PRODUCT ++ +++#ifdef RISCV +++ set_cmp_oprs(op); +++ // lir_cmp set cmp oprs only on riscv +++ if (op->code() == lir_cmp) return; +++#endif +++ ++ _operations.append(op); ++ ++ #ifdef ASSERT ++@@ -2014,6 +2130,10 @@ class LIR_List: public CompilationResour ++ void set_file_and_line(const char * file, int line); ++ #endif ++ +++#ifdef RISCV +++ void set_cmp_oprs(LIR_Op* op); +++#endif +++ ++ //---------- accessors --------------- ++ LIR_OpList* instructions_list() { return &_operations; } ++ int length() const { return _operations.length(); } ++@@ -2133,9 +2253,16 @@ class LIR_List: public CompilationResour ++ void cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info); ++ void cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Address* addr, CodeEmitInfo* info); ++ +++#ifdef RISCV +++ void cmove(LIR_Condition condition, LIR_Opr src1, LIR_Opr src2, LIR_Opr dst, BasicType type, +++ LIR_Opr cmp_opr1 = LIR_OprFact::illegalOpr, LIR_Opr cmp_opr2 = LIR_OprFact::illegalOpr) { +++ append(new LIR_Op4(lir_cmove, condition, src1, src2, cmp_opr1, cmp_opr2, dst, type)); +++ } +++#else ++ void cmove(LIR_Condition condition, LIR_Opr src1, LIR_Opr src2, LIR_Opr dst, BasicType type) { ++ append(new LIR_Op2(lir_cmove, condition, src1, src2, dst, type)); ++ } +++#endif ++ ++ void cas_long(LIR_Opr addr, LIR_Opr cmp_value, LIR_Opr new_value, ++ LIR_Opr t1, LIR_Opr t2, LIR_Opr result = LIR_OprFact::illegalOpr); ++--- a/src/hotspot/share/c1/c1_LIRAssembler.cpp +++++ b/src/hotspot/share/c1/c1_LIRAssembler.cpp ++@@ -691,9 +691,11 @@ void LIR_Assembler::emit_op2(LIR_Op2* op ++ comp_fl2i(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op); ++ break; ++ +++#ifndef RISCV ++ case lir_cmove: ++ cmove(op->condition(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->type()); ++ break; +++#endif ++ ++ case lir_shl: ++ case lir_shr: ++@@ -756,6 +758,19 @@ void LIR_Assembler::emit_op2(LIR_Op2* op ++ } ++ } ++ +++#ifdef RISCV +++void LIR_Assembler::emit_op4(LIR_Op4* op) { +++ switch(op->code()) { +++ case lir_cmove: +++ cmove(op->condition(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->type(), op->in_opr3(), op->in_opr4()); +++ break; +++ +++ default: +++ Unimplemented(); +++ break; +++ } +++} +++#endif ++ ++ void LIR_Assembler::build_frame() { ++ _masm->build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes()); ++--- a/src/hotspot/share/c1/c1_LIRAssembler.hpp +++++ b/src/hotspot/share/c1/c1_LIRAssembler.hpp ++@@ -186,6 +186,9 @@ class LIR_Assembler: public CompilationR ++ void emit_op1(LIR_Op1* op); ++ void emit_op2(LIR_Op2* op); ++ void emit_op3(LIR_Op3* op); +++#ifdef RISCV +++ void emit_op4(LIR_Op4* op); +++#endif ++ void emit_opBranch(LIR_OpBranch* op); ++ void emit_opLabel(LIR_OpLabel* op); ++ void emit_arraycopy(LIR_OpArrayCopy* op); ++@@ -218,8 +221,12 @@ class LIR_Assembler: public CompilationR ++ void volatile_move_op(LIR_Opr src, LIR_Opr result, BasicType type, CodeEmitInfo* info); ++ void comp_mem_op(LIR_Opr src, LIR_Opr result, BasicType type, CodeEmitInfo* info); // info set for null exceptions ++ void comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr result, LIR_Op2* op); +++#ifdef RISCV +++ void cmove(LIR_Condition code, LIR_Opr left, LIR_Opr right, LIR_Opr result, BasicType type, +++ LIR_Opr cmp_opr1 = LIR_OprFact::illegalOpr, LIR_Opr cmp_opr2 = LIR_OprFact::illegalOpr); +++#else ++ void cmove(LIR_Condition code, LIR_Opr left, LIR_Opr right, LIR_Opr result, BasicType type); ++- +++#endif ++ void call( LIR_OpJavaCall* op, relocInfo::relocType rtype); ++ void ic_call( LIR_OpJavaCall* op); ++ void vtable_call( LIR_OpJavaCall* op); ++--- a/src/hotspot/share/c1/c1_LinearScan.cpp +++++ b/src/hotspot/share/c1/c1_LinearScan.cpp ++@@ -1,5 +1,5 @@ ++ /* ++- * Copyright (c) 2005, 2021, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2005, 2022, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++@@ -1240,8 +1240,13 @@ void LinearScan::add_register_hints(LIR_ ++ break; ++ } ++ case lir_cmove: { +++#ifdef RISCV +++ assert(op->as_Op4() != NULL, "lir_cmove must be LIR_Op4"); +++ LIR_Op4* cmove = (LIR_Op4*)op; +++#else ++ assert(op->as_Op2() != NULL, "lir_cmove must be LIR_Op2"); ++ LIR_Op2* cmove = (LIR_Op2*)op; +++#endif ++ ++ LIR_Opr move_from = cmove->in_opr1(); ++ LIR_Opr move_to = cmove->result_opr(); ++@@ -3138,6 +3143,9 @@ void LinearScan::do_linear_scan() { ++ } ++ } ++ +++#ifndef RISCV +++ // Disable these optimizations on riscv temporarily, because it does not +++ // work when the comparison operands are bound to branches or cmoves. ++ { TIME_LINEAR_SCAN(timer_optimize_lir); ++ ++ EdgeMoveOptimizer::optimize(ir()->code()); ++@@ -3145,6 +3153,7 @@ void LinearScan::do_linear_scan() { ++ // check that cfg is still correct after optimizations ++ ir()->verify(); ++ } +++#endif ++ ++ NOT_PRODUCT(print_lir(1, "Before Code Generation", false)); ++ NOT_PRODUCT(LinearScanStatistic::compute(this, _stat_final)); ++@@ -6368,14 +6377,23 @@ void ControlFlowOptimizer::delete_unnece ++ // There might be a cmove inserted for profiling which depends on the same ++ // compare. If we change the condition of the respective compare, we have ++ // to take care of this cmove as well. +++#ifdef RISCV +++ LIR_Op4* prev_cmove = NULL; +++#else ++ LIR_Op2* prev_cmove = NULL; +++#endif ++ ++ for(int j = instructions->length() - 3; j >= 0 && prev_cmp == NULL; j--) { ++ prev_op = instructions->at(j); ++ // check for the cmove ++ if (prev_op->code() == lir_cmove) { +++#ifdef RISCV +++ assert(prev_op->as_Op4() != NULL, "cmove must be of type LIR_Op4"); +++ prev_cmove = (LIR_Op4*)prev_op; +++#else ++ assert(prev_op->as_Op2() != NULL, "cmove must be of type LIR_Op2"); ++ prev_cmove = (LIR_Op2*)prev_op; +++#endif ++ assert(prev_branch->cond() == prev_cmove->condition(), "should be the same"); ++ } ++ if (prev_op->code() == lir_cmp) { ++--- a/src/hotspot/share/gc/shenandoah/shenandoahArguments.cpp +++++ b/src/hotspot/share/gc/shenandoah/shenandoahArguments.cpp ++@@ -1,5 +1,5 @@ ++ /* ++- * Copyright (c) 2018, 2021, Red Hat, Inc. All rights reserved. +++ * Copyright (c) 2018, 2022, Red Hat, Inc. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++@@ -35,7 +35,7 @@ ++ #include "utilities/defaultStream.hpp" ++ ++ void ShenandoahArguments::initialize() { ++-#if !(defined AARCH64 || defined AMD64 || defined IA32 || defined PPC64) +++#if !(defined AARCH64 || defined AMD64 || defined IA32 || defined PPC64 || defined RISCV64) ++ vm_exit_during_initialization("Shenandoah GC is not supported on this platform."); ++ #endif ++ ++--- a/src/hotspot/share/gc/z/c1/zBarrierSetC1.cpp +++++ b/src/hotspot/share/gc/z/c1/zBarrierSetC1.cpp ++@@ -1,5 +1,5 @@ ++ /* ++- * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++@@ -94,7 +94,11 @@ private: ++ ++ public: ++ LIR_OpZLoadBarrierTest(LIR_Opr opr) : +++#ifdef RISCV +++ LIR_Op(lir_zloadbarrier_test, LIR_OprFact::illegalOpr, NULL), +++#else ++ LIR_Op(), +++#endif ++ _opr(opr) {} ++ ++ virtual void visit(LIR_OpVisitState* state) { ++--- a/src/hotspot/share/jfr/utilities/jfrBigEndian.hpp +++++ b/src/hotspot/share/jfr/utilities/jfrBigEndian.hpp ++@@ -102,7 +102,7 @@ inline T JfrBigEndian::read_unaligned(co ++ inline bool JfrBigEndian::platform_supports_unaligned_reads(void) { ++ #if defined(IA32) || defined(AMD64) || defined(PPC) || defined(S390) ++ return true; ++-#elif defined(ARM) || defined(AARCH64) +++#elif defined(ARM) || defined(AARCH64) || defined(RISCV) ++ return false; ++ #else ++ #warning "Unconfigured platform" ++--- a/src/hotspot/share/opto/regmask.hpp +++++ b/src/hotspot/share/opto/regmask.hpp ++@@ -1,5 +1,5 @@ ++ /* ++- * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++@@ -99,7 +99,7 @@ class RegMask { ++ // requirement is internal to the allocator, and independent of any ++ // particular platform. ++ enum { SlotsPerLong = 2, ++- SlotsPerVecA = 8, +++ SlotsPerVecA = RISCV_ONLY(4) NOT_RISCV(8), ++ SlotsPerVecS = 1, ++ SlotsPerVecD = 2, ++ SlotsPerVecX = 4, ++--- a/src/hotspot/share/runtime/abstract_vm_version.cpp +++++ b/src/hotspot/share/runtime/abstract_vm_version.cpp ++@@ -183,7 +183,8 @@ const char* Abstract_VM_Version::jre_rel ++ AMD64_ONLY("amd64") \ ++ IA32_ONLY("x86") \ ++ IA64_ONLY("ia64") \ ++- S390_ONLY("s390") +++ S390_ONLY("s390") \ +++ RISCV64_ONLY("riscv64") ++ #endif // !ZERO ++ #endif // !CPU ++ ++--- a/src/hotspot/share/runtime/thread.inline.hpp +++++ b/src/hotspot/share/runtime/thread.inline.hpp ++@@ -1,5 +1,5 @@ ++ /* ++- * Copyright (c) 2012, 2021, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2012, 2022, Oracle and/or its affiliates. All rights reserved. ++ * Copyright (c) 2021, Azul Systems, Inc. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++@@ -132,7 +132,7 @@ inline void JavaThread::set_pending_asyn ++ } ++ ++ inline JavaThreadState JavaThread::thread_state() const { ++-#if defined(PPC64) || defined (AARCH64) +++#if defined(PPC64) || defined (AARCH64) || defined(RISCV64) ++ // Use membars when accessing volatile _thread_state. See ++ // Threads::create_vm() for size checks. ++ return (JavaThreadState) Atomic::load_acquire((volatile jint*)&_thread_state); ++@@ -144,7 +144,7 @@ inline JavaThreadState JavaThread::threa ++ inline void JavaThread::set_thread_state(JavaThreadState s) { ++ assert(current_or_null() == NULL || current_or_null() == this, ++ "state change should only be called by the current thread"); ++-#if defined(PPC64) || defined (AARCH64) +++#if defined(PPC64) || defined (AARCH64) || defined(RISCV64) ++ // Use membars when accessing volatile _thread_state. See ++ // Threads::create_vm() for size checks. ++ Atomic::release_store((volatile jint*)&_thread_state, (jint)s); ++--- a/src/hotspot/share/utilities/macros.hpp +++++ b/src/hotspot/share/utilities/macros.hpp ++@@ -553,6 +553,32 @@ ++ ++ #define MACOS_AARCH64_ONLY(x) MACOS_ONLY(AARCH64_ONLY(x)) ++ +++#if defined(RISCV32) || defined(RISCV64) +++#define RISCV +++#define RISCV_ONLY(code) code +++#define NOT_RISCV(code) +++#else +++#undef RISCV +++#define RISCV_ONLY(code) +++#define NOT_RISCV(code) code +++#endif +++ +++#ifdef RISCV32 +++#define RISCV32_ONLY(code) code +++#define NOT_RISCV32(code) +++#else +++#define RISCV32_ONLY(code) +++#define NOT_RISCV32(code) code +++#endif +++ +++#ifdef RISCV64 +++#define RISCV64_ONLY(code) code +++#define NOT_RISCV64(code) +++#else +++#define RISCV64_ONLY(code) +++#define NOT_RISCV64(code) code +++#endif +++ ++ #ifdef VM_LITTLE_ENDIAN ++ #define LITTLE_ENDIAN_ONLY(code) code ++ #define BIG_ENDIAN_ONLY(code) ++--- a/src/jdk.hotspot.agent/linux/native/libsaproc/LinuxDebuggerLocal.cpp +++++ b/src/jdk.hotspot.agent/linux/native/libsaproc/LinuxDebuggerLocal.cpp ++@@ -1,5 +1,5 @@ ++ /* ++- * Copyright (c) 2002, 2021, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2002, 2022, Oracle and/or its affiliates. All rights reserved. ++ * Copyright (c) 2019, 2021, NTT DATA. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++@@ -60,6 +60,10 @@ ++ #include "sun_jvm_hotspot_debugger_aarch64_AARCH64ThreadContext.h" ++ #endif ++ +++#ifdef riscv64 +++#include "sun_jvm_hotspot_debugger_riscv64_RISCV64ThreadContext.h" +++#endif +++ ++ class AutoJavaString { ++ JNIEnv* m_env; ++ jstring m_str; ++@@ -408,7 +412,7 @@ JNIEXPORT jbyteArray JNICALL Java_sun_jv ++ return (err == PS_OK)? array : 0; ++ } ++ ++-#if defined(i586) || defined(amd64) || defined(ppc64) || defined(ppc64le) || defined(aarch64) +++#if defined(i586) || defined(amd64) || defined(ppc64) || defined(ppc64le) || defined(aarch64) || defined(riscv64) ++ extern "C" ++ JNIEXPORT jlongArray JNICALL Java_sun_jvm_hotspot_debugger_linux_LinuxDebuggerLocal_getThreadIntegerRegisterSet0 ++ (JNIEnv *env, jobject this_obj, jint lwp_id) { ++@@ -440,6 +444,9 @@ JNIEXPORT jlongArray JNICALL Java_sun_jv ++ #ifdef aarch64 ++ #define NPRGREG sun_jvm_hotspot_debugger_aarch64_AARCH64ThreadContext_NPRGREG ++ #endif +++#ifdef riscv64 +++#define NPRGREG sun_jvm_hotspot_debugger_riscv64_RISCV64ThreadContext_NPRGREG +++#endif ++ #if defined(ppc64) || defined(ppc64le) ++ #define NPRGREG sun_jvm_hotspot_debugger_ppc64_PPC64ThreadContext_NPRGREG ++ #endif ++@@ -516,6 +523,44 @@ JNIEXPORT jlongArray JNICALL Java_sun_jv ++ } ++ #endif /* aarch64 */ ++ +++#if defined(riscv64) +++#define REG_INDEX(reg) sun_jvm_hotspot_debugger_riscv64_RISCV64ThreadContext_##reg +++ +++ regs[REG_INDEX(PC)] = gregs.pc; +++ regs[REG_INDEX(LR)] = gregs.ra; +++ regs[REG_INDEX(SP)] = gregs.sp; +++ regs[REG_INDEX(R3)] = gregs.gp; +++ regs[REG_INDEX(R4)] = gregs.tp; +++ regs[REG_INDEX(R5)] = gregs.t0; +++ regs[REG_INDEX(R6)] = gregs.t1; +++ regs[REG_INDEX(R7)] = gregs.t2; +++ regs[REG_INDEX(R8)] = gregs.s0; +++ regs[REG_INDEX(R9)] = gregs.s1; +++ regs[REG_INDEX(R10)] = gregs.a0; +++ regs[REG_INDEX(R11)] = gregs.a1; +++ regs[REG_INDEX(R12)] = gregs.a2; +++ regs[REG_INDEX(R13)] = gregs.a3; +++ regs[REG_INDEX(R14)] = gregs.a4; +++ regs[REG_INDEX(R15)] = gregs.a5; +++ regs[REG_INDEX(R16)] = gregs.a6; +++ regs[REG_INDEX(R17)] = gregs.a7; +++ regs[REG_INDEX(R18)] = gregs.s2; +++ regs[REG_INDEX(R19)] = gregs.s3; +++ regs[REG_INDEX(R20)] = gregs.s4; +++ regs[REG_INDEX(R21)] = gregs.s5; +++ regs[REG_INDEX(R22)] = gregs.s6; +++ regs[REG_INDEX(R23)] = gregs.s7; +++ regs[REG_INDEX(R24)] = gregs.s8; +++ regs[REG_INDEX(R25)] = gregs.s9; +++ regs[REG_INDEX(R26)] = gregs.s10; +++ regs[REG_INDEX(R27)] = gregs.s11; +++ regs[REG_INDEX(R28)] = gregs.t3; +++ regs[REG_INDEX(R29)] = gregs.t4; +++ regs[REG_INDEX(R30)] = gregs.t5; +++ regs[REG_INDEX(R31)] = gregs.t6; +++ +++#endif /* riscv64 */ +++ ++ #if defined(ppc64) || defined(ppc64le) ++ #define REG_INDEX(reg) sun_jvm_hotspot_debugger_ppc64_PPC64ThreadContext_##reg ++ ++--- a/src/jdk.hotspot.agent/linux/native/libsaproc/libproc.h +++++ b/src/jdk.hotspot.agent/linux/native/libsaproc/libproc.h ++@@ -1,5 +1,5 @@ ++ /* ++- * Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2003, 2022, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++@@ -43,6 +43,8 @@ ++ #elif defined(arm) ++ #include ++ #define user_regs_struct pt_regs +++#elif defined(riscv64) +++#include ++ #endif ++ ++ // This C bool type must be int for compatibility with Linux calls and ++--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/HotSpotAgent.java +++++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/HotSpotAgent.java ++@@ -36,6 +36,7 @@ import sun.jvm.hotspot.debugger.MachineD ++ import sun.jvm.hotspot.debugger.MachineDescriptionAMD64; ++ import sun.jvm.hotspot.debugger.MachineDescriptionPPC64; ++ import sun.jvm.hotspot.debugger.MachineDescriptionAArch64; +++import sun.jvm.hotspot.debugger.MachineDescriptionRISCV64; ++ import sun.jvm.hotspot.debugger.MachineDescriptionIntelX86; ++ import sun.jvm.hotspot.debugger.NoSuchSymbolException; ++ import sun.jvm.hotspot.debugger.bsd.BsdDebuggerLocal; ++@@ -569,6 +570,8 @@ public class HotSpotAgent { ++ machDesc = new MachineDescriptionPPC64(); ++ } else if (cpu.equals("aarch64")) { ++ machDesc = new MachineDescriptionAArch64(); +++ } else if (cpu.equals("riscv64")) { +++ machDesc = new MachineDescriptionRISCV64(); ++ } else { ++ try { ++ machDesc = (MachineDescription) ++--- /dev/null +++++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/MachineDescriptionRISCV64.java ++@@ -0,0 +1,40 @@ +++/* +++ * Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++package sun.jvm.hotspot.debugger; +++ +++public class MachineDescriptionRISCV64 extends MachineDescriptionTwosComplement implements MachineDescription { +++ public long getAddressSize() { +++ return 8; +++ } +++ +++ public boolean isLP64() { +++ return true; +++ } +++ +++ public boolean isBigEndian() { +++ return false; +++ } +++} ++--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/linux/LinuxCDebugger.java +++++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/linux/LinuxCDebugger.java ++@@ -1,5 +1,5 @@ ++ /* ++- * Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2003, 2022, Oracle and/or its affiliates. All rights reserved. ++ * Copyright (c) 2015, Red Hat Inc. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++@@ -33,11 +33,13 @@ import sun.jvm.hotspot.debugger.cdbg.*; ++ import sun.jvm.hotspot.debugger.x86.*; ++ import sun.jvm.hotspot.debugger.amd64.*; ++ import sun.jvm.hotspot.debugger.aarch64.*; +++import sun.jvm.hotspot.debugger.riscv64.*; ++ import sun.jvm.hotspot.debugger.ppc64.*; ++ import sun.jvm.hotspot.debugger.linux.x86.*; ++ import sun.jvm.hotspot.debugger.linux.amd64.*; ++ import sun.jvm.hotspot.debugger.linux.ppc64.*; ++ import sun.jvm.hotspot.debugger.linux.aarch64.*; +++import sun.jvm.hotspot.debugger.linux.riscv64.*; ++ import sun.jvm.hotspot.utilities.*; ++ ++ class LinuxCDebugger implements CDebugger { ++@@ -105,7 +107,14 @@ class LinuxCDebugger implements CDebugge ++ Address pc = context.getRegisterAsAddress(AARCH64ThreadContext.PC); ++ if (pc == null) return null; ++ return new LinuxAARCH64CFrame(dbg, fp, pc); ++- } else { +++ } else if (cpu.equals("riscv64")) { +++ RISCV64ThreadContext context = (RISCV64ThreadContext) thread.getContext(); +++ Address fp = context.getRegisterAsAddress(RISCV64ThreadContext.FP); +++ if (fp == null) return null; +++ Address pc = context.getRegisterAsAddress(RISCV64ThreadContext.PC); +++ if (pc == null) return null; +++ return new LinuxRISCV64CFrame(dbg, fp, pc); +++ } else { ++ // Runtime exception thrown by LinuxThreadContextFactory if unknown cpu ++ ThreadContext context = (ThreadContext) thread.getContext(); ++ return context.getTopFrame(dbg); ++--- /dev/null +++++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/linux/riscv64/LinuxRISCV64CFrame.java ++@@ -0,0 +1,90 @@ +++/* +++ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2015, Red Hat Inc. +++ * Copyright (c) 2021, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++package sun.jvm.hotspot.debugger.linux.riscv64; +++ +++import sun.jvm.hotspot.debugger.*; +++import sun.jvm.hotspot.debugger.riscv64.*; +++import sun.jvm.hotspot.debugger.linux.*; +++import sun.jvm.hotspot.debugger.cdbg.*; +++import sun.jvm.hotspot.debugger.cdbg.basic.*; +++ +++public final class LinuxRISCV64CFrame extends BasicCFrame { +++ private static final int C_FRAME_LINK_OFFSET = -2; +++ private static final int C_FRAME_RETURN_ADDR_OFFSET = -1; +++ +++ public LinuxRISCV64CFrame(LinuxDebugger dbg, Address fp, Address pc) { +++ super(dbg.getCDebugger()); +++ this.fp = fp; +++ this.pc = pc; +++ this.dbg = dbg; +++ } +++ +++ // override base class impl to avoid ELF parsing +++ public ClosestSymbol closestSymbolToPC() { +++ // try native lookup in debugger. +++ return dbg.lookup(dbg.getAddressValue(pc())); +++ } +++ +++ public Address pc() { +++ return pc; +++ } +++ +++ public Address localVariableBase() { +++ return fp; +++ } +++ +++ public CFrame sender(ThreadProxy thread) { +++ RISCV64ThreadContext context = (RISCV64ThreadContext) thread.getContext(); +++ Address rsp = context.getRegisterAsAddress(RISCV64ThreadContext.SP); +++ +++ if ((fp == null) || fp.lessThan(rsp)) { +++ return null; +++ } +++ +++ // Check alignment of fp +++ if (dbg.getAddressValue(fp) % (2 * ADDRESS_SIZE) != 0) { +++ return null; +++ } +++ +++ Address nextFP = fp.getAddressAt(C_FRAME_LINK_OFFSET * ADDRESS_SIZE); +++ if (nextFP == null || nextFP.lessThanOrEqual(fp)) { +++ return null; +++ } +++ Address nextPC = fp.getAddressAt(C_FRAME_RETURN_ADDR_OFFSET * ADDRESS_SIZE); +++ if (nextPC == null) { +++ return null; +++ } +++ return new LinuxRISCV64CFrame(dbg, nextFP, nextPC); +++ } +++ +++ // package/class internals only +++ private static final int ADDRESS_SIZE = 8; +++ private Address pc; +++ private Address sp; +++ private Address fp; +++ private LinuxDebugger dbg; +++} ++--- /dev/null +++++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/linux/riscv64/LinuxRISCV64ThreadContext.java ++@@ -0,0 +1,48 @@ +++/* +++ * Copyright (c) 2003, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2015, Red Hat Inc. +++ * Copyright (c) 2021, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++package sun.jvm.hotspot.debugger.linux.riscv64; +++ +++import sun.jvm.hotspot.debugger.*; +++import sun.jvm.hotspot.debugger.riscv64.*; +++import sun.jvm.hotspot.debugger.linux.*; +++ +++public class LinuxRISCV64ThreadContext extends RISCV64ThreadContext { +++ private LinuxDebugger debugger; +++ +++ public LinuxRISCV64ThreadContext(LinuxDebugger debugger) { +++ super(); +++ this.debugger = debugger; +++ } +++ +++ public void setRegisterAsAddress(int index, Address value) { +++ setRegister(index, debugger.getAddressValue(value)); +++ } +++ +++ public Address getRegisterAsAddress(int index) { +++ return debugger.newAddress(getRegister(index)); +++ } +++} ++--- /dev/null +++++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/proc/riscv64/ProcRISCV64Thread.java ++@@ -0,0 +1,88 @@ +++/* +++ * Copyright (c) 2004, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2015, Red Hat Inc. +++ * Copyright (c) 2021, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++package sun.jvm.hotspot.debugger.proc.riscv64; +++ +++import sun.jvm.hotspot.debugger.*; +++import sun.jvm.hotspot.debugger.riscv64.*; +++import sun.jvm.hotspot.debugger.proc.*; +++import sun.jvm.hotspot.utilities.*; +++ +++public class ProcRISCV64Thread implements ThreadProxy { +++ private ProcDebugger debugger; +++ private int id; +++ +++ public ProcRISCV64Thread(ProcDebugger debugger, Address addr) { +++ this.debugger = debugger; +++ +++ // FIXME: the size here should be configurable. However, making it +++ // so would produce a dependency on the "types" package from the +++ // debugger package, which is not desired. +++ this.id = (int) addr.getCIntegerAt(0, 4, true); +++ } +++ +++ public ProcRISCV64Thread(ProcDebugger debugger, long id) { +++ this.debugger = debugger; +++ this.id = (int) id; +++ } +++ +++ public ThreadContext getContext() throws IllegalThreadStateException { +++ ProcRISCV64ThreadContext context = new ProcRISCV64ThreadContext(debugger); +++ long[] regs = debugger.getThreadIntegerRegisterSet(id); +++ if (Assert.ASSERTS_ENABLED) { +++ Assert.that(regs.length == RISCV64ThreadContext.NPRGREG, "size mismatch"); +++ } +++ for (int i = 0; i < regs.length; i++) { +++ context.setRegister(i, regs[i]); +++ } +++ return context; +++ } +++ +++ public boolean canSetContext() throws DebuggerException { +++ return false; +++ } +++ +++ public void setContext(ThreadContext context) +++ throws IllegalThreadStateException, DebuggerException { +++ throw new DebuggerException("Unimplemented"); +++ } +++ +++ public String toString() { +++ return "t@" + id; +++ } +++ +++ public boolean equals(Object obj) { +++ if ((obj == null) || !(obj instanceof ProcRISCV64Thread)) { +++ return false; +++ } +++ +++ return (((ProcRISCV64Thread) obj).id == id); +++ } +++ +++ public int hashCode() { +++ return id; +++ } +++} ++--- /dev/null +++++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/proc/riscv64/ProcRISCV64ThreadContext.java ++@@ -0,0 +1,48 @@ +++/* +++ * Copyright (c) 2004, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2015, Red Hat Inc. +++ * Copyright (c) 2021, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++package sun.jvm.hotspot.debugger.proc.riscv64; +++ +++import sun.jvm.hotspot.debugger.*; +++import sun.jvm.hotspot.debugger.riscv64.*; +++import sun.jvm.hotspot.debugger.proc.*; +++ +++public class ProcRISCV64ThreadContext extends RISCV64ThreadContext { +++ private ProcDebugger debugger; +++ +++ public ProcRISCV64ThreadContext(ProcDebugger debugger) { +++ super(); +++ this.debugger = debugger; +++ } +++ +++ public void setRegisterAsAddress(int index, Address value) { +++ setRegister(index, debugger.getAddressValue(value)); +++ } +++ +++ public Address getRegisterAsAddress(int index) { +++ return debugger.newAddress(getRegister(index)); +++ } +++} ++--- /dev/null +++++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/proc/riscv64/ProcRISCV64ThreadFactory.java ++@@ -0,0 +1,46 @@ +++/* +++ * Copyright (c) 2004, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2015, Red Hat Inc. +++ * Copyright (c) 2021, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++package sun.jvm.hotspot.debugger.proc.riscv64; +++ +++import sun.jvm.hotspot.debugger.*; +++import sun.jvm.hotspot.debugger.proc.*; +++ +++public class ProcRISCV64ThreadFactory implements ProcThreadFactory { +++ private ProcDebugger debugger; +++ +++ public ProcRISCV64ThreadFactory(ProcDebugger debugger) { +++ this.debugger = debugger; +++ } +++ +++ public ThreadProxy createThreadWrapper(Address threadIdentifierAddr) { +++ return new ProcRISCV64Thread(debugger, threadIdentifierAddr); +++ } +++ +++ public ThreadProxy createThreadWrapper(long id) { +++ return new ProcRISCV64Thread(debugger, id); +++ } +++} ++--- /dev/null +++++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/remote/riscv64/RemoteRISCV64Thread.java ++@@ -0,0 +1,55 @@ +++/* +++ * Copyright (c) 2004, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2015, Red Hat Inc. +++ * Copyright (c) 2021, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++package sun.jvm.hotspot.debugger.remote.riscv64; +++ +++import sun.jvm.hotspot.debugger.*; +++import sun.jvm.hotspot.debugger.riscv64.*; +++import sun.jvm.hotspot.debugger.remote.*; +++import sun.jvm.hotspot.utilities.*; +++ +++public class RemoteRISCV64Thread extends RemoteThread { +++ public RemoteRISCV64Thread(RemoteDebuggerClient debugger, Address addr) { +++ super(debugger, addr); +++ } +++ +++ public RemoteRISCV64Thread(RemoteDebuggerClient debugger, long id) { +++ super(debugger, id); +++ } +++ +++ public ThreadContext getContext() throws IllegalThreadStateException { +++ RemoteRISCV64ThreadContext context = new RemoteRISCV64ThreadContext(debugger); +++ long[] regs = (addr != null)? debugger.getThreadIntegerRegisterSet(addr) : +++ debugger.getThreadIntegerRegisterSet(id); +++ if (Assert.ASSERTS_ENABLED) { +++ Assert.that(regs.length == RISCV64ThreadContext.NPRGREG, "size of register set must match"); +++ } +++ for (int i = 0; i < regs.length; i++) { +++ context.setRegister(i, regs[i]); +++ } +++ return context; +++ } +++} ++--- /dev/null +++++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/remote/riscv64/RemoteRISCV64ThreadContext.java ++@@ -0,0 +1,48 @@ +++/* +++ * Copyright (c) 2004, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2015, Red Hat Inc. +++ * Copyright (c) 2021, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++package sun.jvm.hotspot.debugger.remote.riscv64; +++ +++import sun.jvm.hotspot.debugger.*; +++import sun.jvm.hotspot.debugger.riscv64.*; +++import sun.jvm.hotspot.debugger.remote.*; +++ +++public class RemoteRISCV64ThreadContext extends RISCV64ThreadContext { +++ private RemoteDebuggerClient debugger; +++ +++ public RemoteRISCV64ThreadContext(RemoteDebuggerClient debugger) { +++ super(); +++ this.debugger = debugger; +++ } +++ +++ public void setRegisterAsAddress(int index, Address value) { +++ setRegister(index, debugger.getAddressValue(value)); +++ } +++ +++ public Address getRegisterAsAddress(int index) { +++ return debugger.newAddress(getRegister(index)); +++ } +++} ++--- /dev/null +++++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/remote/riscv64/RemoteRISCV64ThreadFactory.java ++@@ -0,0 +1,46 @@ +++/* +++ * Copyright (c) 2004, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2015, Red Hat Inc. +++ * Copyright (c) 2021, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++package sun.jvm.hotspot.debugger.remote.riscv64; +++ +++import sun.jvm.hotspot.debugger.*; +++import sun.jvm.hotspot.debugger.remote.*; +++ +++public class RemoteRISCV64ThreadFactory implements RemoteThreadFactory { +++ private RemoteDebuggerClient debugger; +++ +++ public RemoteRISCV64ThreadFactory(RemoteDebuggerClient debugger) { +++ this.debugger = debugger; +++ } +++ +++ public ThreadProxy createThreadWrapper(Address threadIdentifierAddr) { +++ return new RemoteRISCV64Thread(debugger, threadIdentifierAddr); +++ } +++ +++ public ThreadProxy createThreadWrapper(long id) { +++ return new RemoteRISCV64Thread(debugger, id); +++ } +++} ++--- /dev/null +++++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/riscv/RISCV64ThreadContext.java ++@@ -0,0 +1,172 @@ +++/* +++ * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2015, Red Hat Inc. +++ * Copyright (c) 2021, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++package sun.jvm.hotspot.debugger.riscv64; +++ +++import java.lang.annotation.Native; +++ +++import sun.jvm.hotspot.debugger.*; +++import sun.jvm.hotspot.debugger.cdbg.*; +++ +++/** Specifies the thread context on riscv64 platforms; only a sub-portion +++ * of the context is guaranteed to be present on all operating +++ * systems. */ +++ +++public abstract class RISCV64ThreadContext implements ThreadContext { +++ // Taken from /usr/include/asm/sigcontext.h on Linux/RISCV64. +++ +++ // /* +++ // * Signal context structure - contains all info to do with the state +++ // * before the signal handler was invoked. +++ // */ +++ // struct sigcontext { +++ // struct user_regs_struct sc_regs; +++ // union __riscv_fp_state sc_fpregs; +++ // }; +++ // +++ // struct user_regs_struct { +++ // unsigned long pc; +++ // unsigned long ra; +++ // unsigned long sp; +++ // unsigned long gp; +++ // unsigned long tp; +++ // unsigned long t0; +++ // unsigned long t1; +++ // unsigned long t2; +++ // unsigned long s0; +++ // unsigned long s1; +++ // unsigned long a0; +++ // unsigned long a1; +++ // unsigned long a2; +++ // unsigned long a3; +++ // unsigned long a4; +++ // unsigned long a5; +++ // unsigned long a6; +++ // unsigned long a7; +++ // unsigned long s2; +++ // unsigned long s3; +++ // unsigned long s4; +++ // unsigned long s5; +++ // unsigned long s6; +++ // unsigned long s7; +++ // unsigned long s8; +++ // unsigned long s9; +++ // unsigned long s10; +++ // unsigned long s11; +++ // unsigned long t3; +++ // unsigned long t4; +++ // unsigned long t5; +++ // unsigned long t6; +++ // }; +++ +++ // NOTE: the indices for the various registers must be maintained as +++ // listed across various operating systems. However, only a small +++ // subset of the registers' values are guaranteed to be present (and +++ // must be present for the SA's stack walking to work) +++ +++ // One instance of the Native annotation is enough to trigger header generation +++ // for this file. +++ @Native +++ public static final int R0 = 0; +++ public static final int R1 = 1; +++ public static final int R2 = 2; +++ public static final int R3 = 3; +++ public static final int R4 = 4; +++ public static final int R5 = 5; +++ public static final int R6 = 6; +++ public static final int R7 = 7; +++ public static final int R8 = 8; +++ public static final int R9 = 9; +++ public static final int R10 = 10; +++ public static final int R11 = 11; +++ public static final int R12 = 12; +++ public static final int R13 = 13; +++ public static final int R14 = 14; +++ public static final int R15 = 15; +++ public static final int R16 = 16; +++ public static final int R17 = 17; +++ public static final int R18 = 18; +++ public static final int R19 = 19; +++ public static final int R20 = 20; +++ public static final int R21 = 21; +++ public static final int R22 = 22; +++ public static final int R23 = 23; +++ public static final int R24 = 24; +++ public static final int R25 = 25; +++ public static final int R26 = 26; +++ public static final int R27 = 27; +++ public static final int R28 = 28; +++ public static final int R29 = 29; +++ public static final int R30 = 30; +++ public static final int R31 = 31; +++ +++ public static final int NPRGREG = 32; +++ +++ public static final int PC = R0; +++ public static final int LR = R1; +++ public static final int SP = R2; +++ public static final int FP = R8; +++ +++ private long[] data; +++ +++ public RISCV64ThreadContext() { +++ data = new long[NPRGREG]; +++ } +++ +++ public int getNumRegisters() { +++ return NPRGREG; +++ } +++ +++ public String getRegisterName(int index) { +++ switch (index) { +++ case LR: return "lr"; +++ case SP: return "sp"; +++ case PC: return "pc"; +++ default: +++ return "r" + index; +++ } +++ } +++ +++ public void setRegister(int index, long value) { +++ data[index] = value; +++ } +++ +++ public long getRegister(int index) { +++ return data[index]; +++ } +++ +++ public CFrame getTopFrame(Debugger dbg) { +++ return null; +++ } +++ +++ /** This can't be implemented in this class since we would have to +++ * tie the implementation to, for example, the debugging system */ +++ public abstract void setRegisterAsAddress(int index, Address value); +++ +++ /** This can't be implemented in this class since we would have to +++ * tie the implementation to, for example, the debugging system */ +++ public abstract Address getRegisterAsAddress(int index); +++} ++--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/Threads.java +++++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/Threads.java ++@@ -1,5 +1,5 @@ ++ /* ++- * Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2000, 2022, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++@@ -34,6 +34,7 @@ import sun.jvm.hotspot.runtime.win32_aar ++ import sun.jvm.hotspot.runtime.linux_x86.LinuxX86JavaThreadPDAccess; ++ import sun.jvm.hotspot.runtime.linux_amd64.LinuxAMD64JavaThreadPDAccess; ++ import sun.jvm.hotspot.runtime.linux_aarch64.LinuxAARCH64JavaThreadPDAccess; +++import sun.jvm.hotspot.runtime.linux_riscv64.LinuxRISCV64JavaThreadPDAccess; ++ import sun.jvm.hotspot.runtime.linux_ppc64.LinuxPPC64JavaThreadPDAccess; ++ import sun.jvm.hotspot.runtime.bsd_x86.BsdX86JavaThreadPDAccess; ++ import sun.jvm.hotspot.runtime.bsd_amd64.BsdAMD64JavaThreadPDAccess; ++@@ -113,6 +114,8 @@ public class Threads { ++ access = new LinuxPPC64JavaThreadPDAccess(); ++ } else if (cpu.equals("aarch64")) { ++ access = new LinuxAARCH64JavaThreadPDAccess(); +++ } else if (cpu.equals("riscv64")) { +++ access = new LinuxRISCV64JavaThreadPDAccess(); ++ } else { ++ try { ++ access = (JavaThreadPDAccess) ++--- /dev/null +++++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/linux_riscv64/LinuxRISCV64JavaThreadPDAccess.java ++@@ -0,0 +1,134 @@ +++/* +++ * Copyright (c) 2003, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2015, Red Hat Inc. +++ * Copyright (c) 2021, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++package sun.jvm.hotspot.runtime.linux_riscv64; +++ +++import java.io.*; +++import java.util.*; +++import sun.jvm.hotspot.debugger.*; +++import sun.jvm.hotspot.debugger.riscv64.*; +++import sun.jvm.hotspot.runtime.*; +++import sun.jvm.hotspot.runtime.riscv64.*; +++import sun.jvm.hotspot.types.*; +++import sun.jvm.hotspot.utilities.*; +++import sun.jvm.hotspot.utilities.Observable; +++import sun.jvm.hotspot.utilities.Observer; +++ +++public class LinuxRISCV64JavaThreadPDAccess implements JavaThreadPDAccess { +++ private static AddressField lastJavaFPField; +++ private static AddressField osThreadField; +++ +++ // Field from OSThread +++ private static CIntegerField osThreadThreadIDField; +++ +++ // This is currently unneeded but is being kept in case we change +++ // the currentFrameGuess algorithm +++ private static final long GUESS_SCAN_RANGE = 128 * 1024; +++ +++ static { +++ VM.registerVMInitializedObserver(new Observer() { +++ public void update(Observable o, Object data) { +++ initialize(VM.getVM().getTypeDataBase()); +++ } +++ }); +++ } +++ +++ private static synchronized void initialize(TypeDataBase db) { +++ Type type = db.lookupType("JavaThread"); +++ osThreadField = type.getAddressField("_osthread"); +++ +++ Type anchorType = db.lookupType("JavaFrameAnchor"); +++ lastJavaFPField = anchorType.getAddressField("_last_Java_fp"); +++ +++ Type osThreadType = db.lookupType("OSThread"); +++ osThreadThreadIDField = osThreadType.getCIntegerField("_thread_id"); +++ } +++ +++ public Address getLastJavaFP(Address addr) { +++ return lastJavaFPField.getValue(addr.addOffsetTo(sun.jvm.hotspot.runtime.JavaThread.getAnchorField().getOffset())); +++ } +++ +++ public Address getLastJavaPC(Address addr) { +++ return null; +++ } +++ +++ public Address getBaseOfStackPointer(Address addr) { +++ return null; +++ } +++ +++ public Frame getLastFramePD(JavaThread thread, Address addr) { +++ Address fp = thread.getLastJavaFP(); +++ if (fp == null) { +++ return null; // no information +++ } +++ return new RISCV64Frame(thread.getLastJavaSP(), fp); +++ } +++ +++ public RegisterMap newRegisterMap(JavaThread thread, boolean updateMap) { +++ return new RISCV64RegisterMap(thread, updateMap); +++ } +++ +++ public Frame getCurrentFrameGuess(JavaThread thread, Address addr) { +++ ThreadProxy t = getThreadProxy(addr); +++ RISCV64ThreadContext context = (RISCV64ThreadContext) t.getContext(); +++ RISCV64CurrentFrameGuess guesser = new RISCV64CurrentFrameGuess(context, thread); +++ if (!guesser.run(GUESS_SCAN_RANGE)) { +++ return null; +++ } +++ if (guesser.getPC() == null) { +++ return new RISCV64Frame(guesser.getSP(), guesser.getFP()); +++ } else { +++ return new RISCV64Frame(guesser.getSP(), guesser.getFP(), guesser.getPC()); +++ } +++ } +++ +++ public void printThreadIDOn(Address addr, PrintStream tty) { +++ tty.print(getThreadProxy(addr)); +++ } +++ +++ public void printInfoOn(Address threadAddr, PrintStream tty) { +++ tty.print("Thread id: "); +++ printThreadIDOn(threadAddr, tty); +++ } +++ +++ public Address getLastSP(Address addr) { +++ ThreadProxy t = getThreadProxy(addr); +++ RISCV64ThreadContext context = (RISCV64ThreadContext) t.getContext(); +++ return context.getRegisterAsAddress(RISCV64ThreadContext.SP); +++ } +++ +++ public ThreadProxy getThreadProxy(Address addr) { +++ // Addr is the address of the JavaThread. +++ // Fetch the OSThread (for now and for simplicity, not making a +++ // separate "OSThread" class in this package) +++ Address osThreadAddr = osThreadField.getValue(addr); +++ // Get the address of the _thread_id from the OSThread +++ Address threadIdAddr = osThreadAddr.addOffsetTo(osThreadThreadIDField.getOffset()); +++ +++ JVMDebugger debugger = VM.getVM().getDebugger(); +++ return debugger.getThreadForIdentifierAddress(threadIdAddr); +++ } +++} ++--- /dev/null +++++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/riscv64/RISCV64CurrentFrameGuess.java ++@@ -0,0 +1,223 @@ +++/* +++ * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2015, 2019, Red Hat Inc. +++ * Copyright (c) 2021, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++package sun.jvm.hotspot.runtime.riscv64; +++ +++import sun.jvm.hotspot.debugger.*; +++import sun.jvm.hotspot.debugger.riscv64.*; +++import sun.jvm.hotspot.code.*; +++import sun.jvm.hotspot.interpreter.*; +++import sun.jvm.hotspot.runtime.*; +++import sun.jvm.hotspot.runtime.riscv64.*; +++ +++/**

Should be able to be used on all riscv64 platforms we support +++ (Linux/riscv64) to implement JavaThread's "currentFrameGuess()" +++ functionality. Input is an RISCV64ThreadContext; output is SP, FP, +++ and PC for an RISCV64Frame. Instantiation of the RISCV64Frame is +++ left to the caller, since we may need to subclass RISCV64Frame to +++ support signal handler frames on Unix platforms.

+++ +++

Algorithm is to walk up the stack within a given range (say, +++ 512K at most) looking for a plausible PC and SP for a Java frame, +++ also considering those coming in from the context. If we find a PC +++ that belongs to the VM (i.e., in generated code like the +++ interpreter or CodeCache) then we try to find an associated FP. +++ We repeat this until we either find a complete frame or run out of +++ stack to look at.

*/ +++ +++public class RISCV64CurrentFrameGuess { +++ private RISCV64ThreadContext context; +++ private JavaThread thread; +++ private Address spFound; +++ private Address fpFound; +++ private Address pcFound; +++ +++ private static final boolean DEBUG = System.getProperty("sun.jvm.hotspot.runtime.riscv64.RISCV64Frame.DEBUG") +++ != null; +++ +++ public RISCV64CurrentFrameGuess(RISCV64ThreadContext context, +++ JavaThread thread) { +++ this.context = context; +++ this.thread = thread; +++ } +++ +++ /** Returns false if not able to find a frame within a reasonable range. */ +++ public boolean run(long regionInBytesToSearch) { +++ Address sp = context.getRegisterAsAddress(RISCV64ThreadContext.SP); +++ Address pc = context.getRegisterAsAddress(RISCV64ThreadContext.PC); +++ Address fp = context.getRegisterAsAddress(RISCV64ThreadContext.FP); +++ if (sp == null) { +++ // Bail out if no last java frame either +++ if (thread.getLastJavaSP() != null) { +++ setValues(thread.getLastJavaSP(), thread.getLastJavaFP(), null); +++ return true; +++ } +++ return false; +++ } +++ Address end = sp.addOffsetTo(regionInBytesToSearch); +++ VM vm = VM.getVM(); +++ +++ setValues(null, null, null); // Assume we're not going to find anything +++ +++ if (vm.isJavaPCDbg(pc)) { +++ if (vm.isClientCompiler()) { +++ // If the topmost frame is a Java frame, we are (pretty much) +++ // guaranteed to have a viable FP. We should be more robust +++ // than this (we have the potential for losing entire threads' +++ // stack traces) but need to see how much work we really have +++ // to do here. Searching the stack for an (SP, FP) pair is +++ // hard since it's easy to misinterpret inter-frame stack +++ // pointers as base-of-frame pointers; we also don't know the +++ // sizes of C1 frames (not registered in the nmethod) so can't +++ // derive them from SP. +++ +++ setValues(sp, fp, pc); +++ return true; +++ } else { +++ if (vm.getInterpreter().contains(pc)) { +++ if (DEBUG) { +++ System.out.println("CurrentFrameGuess: choosing interpreter frame: sp = " + +++ sp + ", fp = " + fp + ", pc = " + pc); +++ } +++ setValues(sp, fp, pc); +++ return true; +++ } +++ +++ // For the server compiler, FP is not guaranteed to be valid +++ // for compiled code. In addition, an earlier attempt at a +++ // non-searching algorithm (see below) failed because the +++ // stack pointer from the thread context was pointing +++ // (considerably) beyond the ostensible end of the stack, into +++ // garbage; walking from the topmost frame back caused a crash. +++ // +++ // This algorithm takes the current PC as a given and tries to +++ // find the correct corresponding SP by walking up the stack +++ // and repeatedly performing stackwalks (very inefficient). +++ // +++ // FIXME: there is something wrong with stackwalking across +++ // adapter frames...this is likely to be the root cause of the +++ // failure with the simpler algorithm below. +++ +++ for (long offset = 0; +++ offset < regionInBytesToSearch; +++ offset += vm.getAddressSize()) { +++ try { +++ Address curSP = sp.addOffsetTo(offset); +++ Frame frame = new RISCV64Frame(curSP, null, pc); +++ RegisterMap map = thread.newRegisterMap(false); +++ while (frame != null) { +++ if (frame.isEntryFrame() && frame.entryFrameIsFirst()) { +++ // We were able to traverse all the way to the +++ // bottommost Java frame. +++ // This sp looks good. Keep it. +++ if (DEBUG) { +++ System.out.println("CurrentFrameGuess: Choosing sp = " + curSP + ", pc = " + pc); +++ } +++ setValues(curSP, null, pc); +++ return true; +++ } +++ frame = frame.sender(map); +++ } +++ } catch (Exception e) { +++ if (DEBUG) { +++ System.out.println("CurrentFrameGuess: Exception " + e + " at offset " + offset); +++ } +++ // Bad SP. Try another. +++ } +++ } +++ +++ // Were not able to find a plausible SP to go with this PC. +++ // Bail out. +++ return false; +++ } +++ } else { +++ // If the current program counter was not known to us as a Java +++ // PC, we currently assume that we are in the run-time system +++ // and attempt to look to thread-local storage for saved SP and +++ // FP. Note that if these are null (because we were, in fact, +++ // in Java code, i.e., vtable stubs or similar, and the SA +++ // didn't have enough insight into the target VM to understand +++ // that) then we are going to lose the entire stack trace for +++ // the thread, which is sub-optimal. FIXME. +++ +++ if (DEBUG) { +++ System.out.println("CurrentFrameGuess: choosing last Java frame: sp = " + +++ thread.getLastJavaSP() + ", fp = " + thread.getLastJavaFP()); +++ } +++ if (thread.getLastJavaSP() == null) { +++ return false; // No known Java frames on stack +++ } +++ +++ // The runtime has a nasty habit of not saving fp in the frame +++ // anchor, leaving us to grovel about in the stack to find a +++ // plausible address. Fortunately, this only happens in +++ // compiled code; there we always have a valid PC, and we always +++ // push LR and FP onto the stack as a pair, with FP at the lower +++ // address. +++ pc = thread.getLastJavaPC(); +++ fp = thread.getLastJavaFP(); +++ sp = thread.getLastJavaSP(); +++ +++ if (fp == null) { +++ CodeCache cc = vm.getCodeCache(); +++ if (cc.contains(pc)) { +++ CodeBlob cb = cc.findBlob(pc); +++ if (DEBUG) { +++ System.out.println("FP is null. Found blob frame size " + cb.getFrameSize()); +++ } +++ // See if we can derive a frame pointer from SP and PC +++ long link_offset = cb.getFrameSize() - 2 * VM.getVM().getAddressSize(); +++ if (link_offset >= 0) { +++ fp = sp.addOffsetTo(link_offset); +++ } +++ } +++ } +++ +++ // We found a PC in the frame anchor. Check that it's plausible, and +++ // if it is, use it. +++ if (vm.isJavaPCDbg(pc)) { +++ setValues(sp, fp, pc); +++ } else { +++ setValues(sp, fp, null); +++ } +++ +++ return true; +++ } +++ } +++ +++ public Address getSP() { return spFound; } +++ public Address getFP() { return fpFound; } +++ /** May be null if getting values from thread-local storage; take +++ care to call the correct RISCV64Frame constructor to recover this if +++ necessary */ +++ public Address getPC() { return pcFound; } +++ +++ private void setValues(Address sp, Address fp, Address pc) { +++ spFound = sp; +++ fpFound = fp; +++ pcFound = pc; +++ } +++} ++--- /dev/null +++++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/riscv64/RISCV64Frame.java ++@@ -0,0 +1,556 @@ +++/* +++ * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2015, 2019, Red Hat Inc. +++ * Copyright (c) 2021, 2022, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++package sun.jvm.hotspot.runtime.riscv64; +++ +++import java.util.*; +++import sun.jvm.hotspot.code.*; +++import sun.jvm.hotspot.compiler.*; +++import sun.jvm.hotspot.debugger.*; +++import sun.jvm.hotspot.oops.*; +++import sun.jvm.hotspot.runtime.*; +++import sun.jvm.hotspot.types.*; +++import sun.jvm.hotspot.utilities.*; +++import sun.jvm.hotspot.utilities.Observable; +++import sun.jvm.hotspot.utilities.Observer; +++ +++/** Specialization of and implementation of abstract methods of the +++ Frame class for the riscv64 family of CPUs. */ +++ +++public class RISCV64Frame extends Frame { +++ private static final boolean DEBUG; +++ static { +++ DEBUG = System.getProperty("sun.jvm.hotspot.runtime.RISCV64.RISCV64Frame.DEBUG") != null; +++ } +++ +++ // Java frames +++ private static final int LINK_OFFSET = -2; +++ private static final int RETURN_ADDR_OFFSET = -1; +++ private static final int SENDER_SP_OFFSET = 0; +++ +++ // Interpreter frames +++ private static final int INTERPRETER_FRAME_SENDER_SP_OFFSET = -3; +++ private static final int INTERPRETER_FRAME_LAST_SP_OFFSET = INTERPRETER_FRAME_SENDER_SP_OFFSET - 1; +++ private static final int INTERPRETER_FRAME_METHOD_OFFSET = INTERPRETER_FRAME_LAST_SP_OFFSET - 1; +++ private static int INTERPRETER_FRAME_MDX_OFFSET; // Non-core builds only +++ private static int INTERPRETER_FRAME_PADDING_OFFSET; +++ private static int INTERPRETER_FRAME_MIRROR_OFFSET; +++ private static int INTERPRETER_FRAME_CACHE_OFFSET; +++ private static int INTERPRETER_FRAME_LOCALS_OFFSET; +++ private static int INTERPRETER_FRAME_BCX_OFFSET; +++ private static int INTERPRETER_FRAME_INITIAL_SP_OFFSET; +++ private static int INTERPRETER_FRAME_MONITOR_BLOCK_TOP_OFFSET; +++ private static int INTERPRETER_FRAME_MONITOR_BLOCK_BOTTOM_OFFSET; +++ +++ // Entry frames +++ private static int ENTRY_FRAME_CALL_WRAPPER_OFFSET = -10; +++ +++ // Native frames +++ private static final int NATIVE_FRAME_INITIAL_PARAM_OFFSET = 2; +++ +++ private static VMReg fp = new VMReg(8); +++ +++ static { +++ VM.registerVMInitializedObserver(new Observer() { +++ public void update(Observable o, Object data) { +++ initialize(VM.getVM().getTypeDataBase()); +++ } +++ }); +++ } +++ +++ private static synchronized void initialize(TypeDataBase db) { +++ INTERPRETER_FRAME_MDX_OFFSET = INTERPRETER_FRAME_METHOD_OFFSET - 1; +++ INTERPRETER_FRAME_PADDING_OFFSET = INTERPRETER_FRAME_MDX_OFFSET - 1; +++ INTERPRETER_FRAME_MIRROR_OFFSET = INTERPRETER_FRAME_PADDING_OFFSET - 1; +++ INTERPRETER_FRAME_CACHE_OFFSET = INTERPRETER_FRAME_MIRROR_OFFSET - 1; +++ INTERPRETER_FRAME_LOCALS_OFFSET = INTERPRETER_FRAME_CACHE_OFFSET - 1; +++ INTERPRETER_FRAME_BCX_OFFSET = INTERPRETER_FRAME_LOCALS_OFFSET - 1; +++ INTERPRETER_FRAME_INITIAL_SP_OFFSET = INTERPRETER_FRAME_BCX_OFFSET - 1; +++ INTERPRETER_FRAME_MONITOR_BLOCK_TOP_OFFSET = INTERPRETER_FRAME_INITIAL_SP_OFFSET; +++ INTERPRETER_FRAME_MONITOR_BLOCK_BOTTOM_OFFSET = INTERPRETER_FRAME_INITIAL_SP_OFFSET; +++ } +++ +++ +++ // an additional field beyond sp and pc: +++ Address raw_fp; // frame pointer +++ private Address raw_unextendedSP; +++ +++ private RISCV64Frame() { +++ } +++ +++ private void adjustForDeopt() { +++ if ( pc != null) { +++ // Look for a deopt pc and if it is deopted convert to original pc +++ CodeBlob cb = VM.getVM().getCodeCache().findBlob(pc); +++ if (cb != null && cb.isJavaMethod()) { +++ NMethod nm = (NMethod) cb; +++ if (pc.equals(nm.deoptHandlerBegin())) { +++ if (Assert.ASSERTS_ENABLED) { +++ Assert.that(this.getUnextendedSP() != null, "null SP in Java frame"); +++ } +++ // adjust pc if frame is deoptimized. +++ pc = this.getUnextendedSP().getAddressAt(nm.origPCOffset()); +++ deoptimized = true; +++ } +++ } +++ } +++ } +++ +++ public RISCV64Frame(Address raw_sp, Address raw_fp, Address pc) { +++ this.raw_sp = raw_sp; +++ this.raw_unextendedSP = raw_sp; +++ this.raw_fp = raw_fp; +++ this.pc = pc; +++ adjustUnextendedSP(); +++ +++ // Frame must be fully constructed before this call +++ adjustForDeopt(); +++ +++ if (DEBUG) { +++ System.out.println("RISCV64Frame(sp, fp, pc): " + this); +++ dumpStack(); +++ } +++ } +++ +++ public RISCV64Frame(Address raw_sp, Address raw_fp) { +++ this.raw_sp = raw_sp; +++ this.raw_unextendedSP = raw_sp; +++ this.raw_fp = raw_fp; +++ +++ // We cannot assume SP[-1] always contains a valid return PC (e.g. if +++ // the callee is a C/C++ compiled frame). If the PC is not known to +++ // Java then this.pc is null. +++ Address savedPC = raw_sp.getAddressAt(-1 * VM.getVM().getAddressSize()); +++ if (VM.getVM().isJavaPCDbg(savedPC)) { +++ this.pc = savedPC; +++ } +++ +++ adjustUnextendedSP(); +++ +++ // Frame must be fully constructed before this call +++ adjustForDeopt(); +++ +++ if (DEBUG) { +++ System.out.println("RISCV64Frame(sp, fp): " + this); +++ dumpStack(); +++ } +++ } +++ +++ public RISCV64Frame(Address raw_sp, Address raw_unextendedSp, Address raw_fp, Address pc) { +++ this.raw_sp = raw_sp; +++ this.raw_unextendedSP = raw_unextendedSp; +++ this.raw_fp = raw_fp; +++ this.pc = pc; +++ adjustUnextendedSP(); +++ +++ // Frame must be fully constructed before this call +++ adjustForDeopt(); +++ +++ if (DEBUG) { +++ System.out.println("RISCV64Frame(sp, unextendedSP, fp, pc): " + this); +++ dumpStack(); +++ } +++ +++ } +++ +++ public Object clone() { +++ RISCV64Frame frame = new RISCV64Frame(); +++ frame.raw_sp = raw_sp; +++ frame.raw_unextendedSP = raw_unextendedSP; +++ frame.raw_fp = raw_fp; +++ frame.pc = pc; +++ frame.deoptimized = deoptimized; +++ return frame; +++ } +++ +++ public boolean equals(Object arg) { +++ if (arg == null) { +++ return false; +++ } +++ +++ if (!(arg instanceof RISCV64Frame)) { +++ return false; +++ } +++ +++ RISCV64Frame other = (RISCV64Frame) arg; +++ +++ return (AddressOps.equal(getSP(), other.getSP()) && +++ AddressOps.equal(getUnextendedSP(), other.getUnextendedSP()) && +++ AddressOps.equal(getFP(), other.getFP()) && +++ AddressOps.equal(getPC(), other.getPC())); +++ } +++ +++ public int hashCode() { +++ if (raw_sp == null) { +++ return 0; +++ } +++ +++ return raw_sp.hashCode(); +++ } +++ +++ public String toString() { +++ return "sp: " + (getSP() == null? "null" : getSP().toString()) + +++ ", unextendedSP: " + (getUnextendedSP() == null? "null" : getUnextendedSP().toString()) + +++ ", fp: " + (getFP() == null? "null" : getFP().toString()) + +++ ", pc: " + (pc == null? "null" : pc.toString()); +++ } +++ +++ // accessors for the instance variables +++ public Address getFP() { return raw_fp; } +++ public Address getSP() { return raw_sp; } +++ public Address getID() { return raw_sp; } +++ +++ // FIXME: not implemented yet +++ public boolean isSignalHandlerFrameDbg() { return false; } +++ public int getSignalNumberDbg() { return 0; } +++ public String getSignalNameDbg() { return null; } +++ +++ public boolean isInterpretedFrameValid() { +++ if (Assert.ASSERTS_ENABLED) { +++ Assert.that(isInterpretedFrame(), "Not an interpreted frame"); +++ } +++ +++ // These are reasonable sanity checks +++ if (getFP() == null || getFP().andWithMask(0x3) != null) { +++ return false; +++ } +++ +++ if (getSP() == null || getSP().andWithMask(0x3) != null) { +++ return false; +++ } +++ +++ if (getFP().addOffsetTo(INTERPRETER_FRAME_INITIAL_SP_OFFSET * VM.getVM().getAddressSize()).lessThan(getSP())) { +++ return false; +++ } +++ +++ // These are hacks to keep us out of trouble. +++ // The problem with these is that they mask other problems +++ if (getFP().lessThanOrEqual(getSP())) { +++ // this attempts to deal with unsigned comparison above +++ return false; +++ } +++ +++ if (getFP().minus(getSP()) > 4096 * VM.getVM().getAddressSize()) { +++ // stack frames shouldn't be large. +++ return false; +++ } +++ +++ return true; +++ } +++ +++ public Frame sender(RegisterMap regMap, CodeBlob cb) { +++ RISCV64RegisterMap map = (RISCV64RegisterMap) regMap; +++ +++ if (Assert.ASSERTS_ENABLED) { +++ Assert.that(map != null, "map must be set"); +++ } +++ +++ // Default is we done have to follow them. The sender_for_xxx will +++ // update it accordingly +++ map.setIncludeArgumentOops(false); +++ +++ if (isEntryFrame()) return senderForEntryFrame(map); +++ if (isInterpretedFrame()) return senderForInterpreterFrame(map); +++ +++ if(cb == null) { +++ cb = VM.getVM().getCodeCache().findBlob(getPC()); +++ } else { +++ if (Assert.ASSERTS_ENABLED) { +++ Assert.that(cb.equals(VM.getVM().getCodeCache().findBlob(getPC())), "Must be the same"); +++ } +++ } +++ +++ if (cb != null) { +++ return senderForCompiledFrame(map, cb); +++ } +++ +++ // Must be native-compiled frame, i.e. the marshaling code for native +++ // methods that exists in the core system. +++ return new RISCV64Frame(getSenderSP(), getLink(), getSenderPC()); +++ } +++ +++ private Frame senderForEntryFrame(RISCV64RegisterMap map) { +++ if (DEBUG) { +++ System.out.println("senderForEntryFrame"); +++ } +++ if (Assert.ASSERTS_ENABLED) { +++ Assert.that(map != null, "map must be set"); +++ } +++ // Java frame called from C; skip all C frames and return top C +++ // frame of that chunk as the sender +++ RISCV64JavaCallWrapper jcw = (RISCV64JavaCallWrapper) getEntryFrameCallWrapper(); +++ if (Assert.ASSERTS_ENABLED) { +++ Assert.that(!entryFrameIsFirst(), "next Java fp must be non zero"); +++ Assert.that(jcw.getLastJavaSP().greaterThan(getSP()), "must be above this frame on stack"); +++ } +++ RISCV64Frame fr; +++ if (jcw.getLastJavaPC() != null) { +++ fr = new RISCV64Frame(jcw.getLastJavaSP(), jcw.getLastJavaFP(), jcw.getLastJavaPC()); +++ } else { +++ fr = new RISCV64Frame(jcw.getLastJavaSP(), jcw.getLastJavaFP()); +++ } +++ map.clear(); +++ if (Assert.ASSERTS_ENABLED) { +++ Assert.that(map.getIncludeArgumentOops(), "should be set by clear"); +++ } +++ return fr; +++ } +++ +++ //------------------------------------------------------------------------------ +++ // frame::adjust_unextended_sp +++ private void adjustUnextendedSP() { +++ // If we are returning to a compiled MethodHandle call site, the +++ // saved_fp will in fact be a saved value of the unextended SP. The +++ // simplest way to tell whether we are returning to such a call site +++ // is as follows: +++ +++ CodeBlob cb = cb(); +++ NMethod senderNm = (cb == null) ? null : cb.asNMethodOrNull(); +++ if (senderNm != null) { +++ // If the sender PC is a deoptimization point, get the original +++ // PC. For MethodHandle call site the unextended_sp is stored in +++ // saved_fp. +++ if (senderNm.isDeoptMhEntry(getPC())) { +++ raw_unextendedSP = getFP(); +++ } +++ else if (senderNm.isDeoptEntry(getPC())) { +++ } +++ else if (senderNm.isMethodHandleReturn(getPC())) { +++ raw_unextendedSP = getFP(); +++ } +++ } +++ } +++ +++ private Frame senderForInterpreterFrame(RISCV64RegisterMap map) { +++ if (DEBUG) { +++ System.out.println("senderForInterpreterFrame"); +++ } +++ Address unextendedSP = addressOfStackSlot(INTERPRETER_FRAME_SENDER_SP_OFFSET).getAddressAt(0); +++ Address sp = addressOfStackSlot(SENDER_SP_OFFSET); +++ // We do not need to update the callee-save register mapping because above +++ // us is either another interpreter frame or a converter-frame, but never +++ // directly a compiled frame. +++ // 11/24/04 SFG. With the removal of adapter frames this is no longer true. +++ // However c2 no longer uses callee save register for java calls so there +++ // are no callee register to find. +++ +++ if (map.getUpdateMap()) +++ updateMapWithSavedLink(map, addressOfStackSlot(LINK_OFFSET)); +++ +++ return new RISCV64Frame(sp, unextendedSP, getLink(), getSenderPC()); +++ } +++ +++ private void updateMapWithSavedLink(RegisterMap map, Address savedFPAddr) { +++ map.setLocation(fp, savedFPAddr); +++ } +++ +++ private Frame senderForCompiledFrame(RISCV64RegisterMap map, CodeBlob cb) { +++ if (DEBUG) { +++ System.out.println("senderForCompiledFrame"); +++ } +++ +++ // +++ // NOTE: some of this code is (unfortunately) duplicated RISCV64CurrentFrameGuess +++ // +++ +++ if (Assert.ASSERTS_ENABLED) { +++ Assert.that(map != null, "map must be set"); +++ } +++ +++ // frame owned by optimizing compiler +++ if (Assert.ASSERTS_ENABLED) { +++ Assert.that(cb.getFrameSize() >= 0, "must have non-zero frame size"); +++ } +++ Address senderSP = getUnextendedSP().addOffsetTo(cb.getFrameSize()); +++ +++ // The return_address is always the word on the stack +++ Address senderPC = senderSP.getAddressAt(-1 * VM.getVM().getAddressSize()); +++ +++ // This is the saved value of FP which may or may not really be an FP. +++ // It is only an FP if the sender is an interpreter frame. +++ Address savedFPAddr = senderSP.addOffsetTo(-2 * VM.getVM().getAddressSize()); +++ +++ if (map.getUpdateMap()) { +++ // Tell GC to use argument oopmaps for some runtime stubs that need it. +++ // For C1, the runtime stub might not have oop maps, so set this flag +++ // outside of update_register_map. +++ map.setIncludeArgumentOops(cb.callerMustGCArguments()); +++ +++ if (cb.getOopMaps() != null) { +++ ImmutableOopMapSet.updateRegisterMap(this, cb, map, true); +++ } +++ +++ // Since the prolog does the save and restore of FP there is no oopmap +++ // for it so we must fill in its location as if there was an oopmap entry +++ // since if our caller was compiled code there could be live jvm state in it. +++ updateMapWithSavedLink(map, savedFPAddr); +++ } +++ +++ return new RISCV64Frame(senderSP, savedFPAddr.getAddressAt(0), senderPC); +++ } +++ +++ protected boolean hasSenderPD() { +++ return true; +++ } +++ +++ public long frameSize() { +++ return (getSenderSP().minus(getSP()) / VM.getVM().getAddressSize()); +++ } +++ +++ public Address getLink() { +++ try { +++ if (DEBUG) { +++ System.out.println("Reading link at " + addressOfStackSlot(LINK_OFFSET) +++ + " = " + addressOfStackSlot(LINK_OFFSET).getAddressAt(0)); +++ } +++ return addressOfStackSlot(LINK_OFFSET).getAddressAt(0); +++ } catch (Exception e) { +++ if (DEBUG) +++ System.out.println("Returning null"); +++ return null; +++ } +++ } +++ +++ public Address getUnextendedSP() { return raw_unextendedSP; } +++ +++ // Return address: +++ public Address getSenderPCAddr() { return addressOfStackSlot(RETURN_ADDR_OFFSET); } +++ public Address getSenderPC() { return getSenderPCAddr().getAddressAt(0); } +++ +++ // return address of param, zero origin index. +++ public Address getNativeParamAddr(int idx) { +++ return addressOfStackSlot(NATIVE_FRAME_INITIAL_PARAM_OFFSET + idx); +++ } +++ +++ public Address getSenderSP() { return addressOfStackSlot(SENDER_SP_OFFSET); } +++ +++ public Address addressOfInterpreterFrameLocals() { +++ return addressOfStackSlot(INTERPRETER_FRAME_LOCALS_OFFSET); +++ } +++ +++ private Address addressOfInterpreterFrameBCX() { +++ return addressOfStackSlot(INTERPRETER_FRAME_BCX_OFFSET); +++ } +++ +++ public int getInterpreterFrameBCI() { +++ // FIXME: this is not atomic with respect to GC and is unsuitable +++ // for use in a non-debugging, or reflective, system. Need to +++ // figure out how to express this. +++ Address bcp = addressOfInterpreterFrameBCX().getAddressAt(0); +++ Address methodHandle = addressOfInterpreterFrameMethod().getAddressAt(0); +++ Method method = (Method)Metadata.instantiateWrapperFor(methodHandle); +++ return bcpToBci(bcp, method); +++ } +++ +++ public Address addressOfInterpreterFrameMDX() { +++ return addressOfStackSlot(INTERPRETER_FRAME_MDX_OFFSET); +++ } +++ +++ // expression stack +++ // (the max_stack arguments are used by the GC; see class FrameClosure) +++ +++ public Address addressOfInterpreterFrameExpressionStack() { +++ Address monitorEnd = interpreterFrameMonitorEnd().address(); +++ return monitorEnd.addOffsetTo(-1 * VM.getVM().getAddressSize()); +++ } +++ +++ public int getInterpreterFrameExpressionStackDirection() { return -1; } +++ +++ // top of expression stack +++ public Address addressOfInterpreterFrameTOS() { +++ return getSP(); +++ } +++ +++ /** Expression stack from top down */ +++ public Address addressOfInterpreterFrameTOSAt(int slot) { +++ return addressOfInterpreterFrameTOS().addOffsetTo(slot * VM.getVM().getAddressSize()); +++ } +++ +++ public Address getInterpreterFrameSenderSP() { +++ if (Assert.ASSERTS_ENABLED) { +++ Assert.that(isInterpretedFrame(), "interpreted frame expected"); +++ } +++ return addressOfStackSlot(INTERPRETER_FRAME_SENDER_SP_OFFSET).getAddressAt(0); +++ } +++ +++ // Monitors +++ public BasicObjectLock interpreterFrameMonitorBegin() { +++ return new BasicObjectLock(addressOfStackSlot(INTERPRETER_FRAME_MONITOR_BLOCK_BOTTOM_OFFSET)); +++ } +++ +++ public BasicObjectLock interpreterFrameMonitorEnd() { +++ Address result = addressOfStackSlot(INTERPRETER_FRAME_MONITOR_BLOCK_TOP_OFFSET).getAddressAt(0); +++ if (Assert.ASSERTS_ENABLED) { +++ // make sure the pointer points inside the frame +++ Assert.that(AddressOps.gt(getFP(), result), "result must < than frame pointer"); +++ Assert.that(AddressOps.lte(getSP(), result), "result must >= than stack pointer"); +++ } +++ return new BasicObjectLock(result); +++ } +++ +++ public int interpreterFrameMonitorSize() { +++ return BasicObjectLock.size(); +++ } +++ +++ // Method +++ public Address addressOfInterpreterFrameMethod() { +++ return addressOfStackSlot(INTERPRETER_FRAME_METHOD_OFFSET); +++ } +++ +++ // Constant pool cache +++ public Address addressOfInterpreterFrameCPCache() { +++ return addressOfStackSlot(INTERPRETER_FRAME_CACHE_OFFSET); +++ } +++ +++ // Entry frames +++ public JavaCallWrapper getEntryFrameCallWrapper() { +++ return new RISCV64JavaCallWrapper(addressOfStackSlot(ENTRY_FRAME_CALL_WRAPPER_OFFSET).getAddressAt(0)); +++ } +++ +++ protected Address addressOfSavedOopResult() { +++ // offset is 2 for compiler2 and 3 for compiler1 +++ return getSP().addOffsetTo((VM.getVM().isClientCompiler() ? 2 : 3) * +++ VM.getVM().getAddressSize()); +++ } +++ +++ protected Address addressOfSavedReceiver() { +++ return getSP().addOffsetTo(-4 * VM.getVM().getAddressSize()); +++ } +++ +++ private void dumpStack() { +++ for (Address addr = getSP().addOffsetTo(-4 * VM.getVM().getAddressSize()); +++ AddressOps.lt(addr, getSP()); +++ addr = addr.addOffsetTo(VM.getVM().getAddressSize())) { +++ System.out.println(addr + ": " + addr.getAddressAt(0)); +++ } +++ System.out.println("-----------------------"); +++ for (Address addr = getSP(); +++ AddressOps.lte(addr, getSP().addOffsetTo(20 * VM.getVM().getAddressSize())); +++ addr = addr.addOffsetTo(VM.getVM().getAddressSize())) { +++ System.out.println(addr + ": " + addr.getAddressAt(0)); +++ } +++ } +++} ++--- /dev/null +++++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/riscv64/RISCV64JavaCallWrapper.java ++@@ -0,0 +1,61 @@ +++/* +++ * Copyright (c) 2003, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2015, Red Hat Inc. +++ * Copyright (c) 2021, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++package sun.jvm.hotspot.runtime.riscv64; +++ +++import java.util.*; +++import sun.jvm.hotspot.debugger.*; +++import sun.jvm.hotspot.types.*; +++import sun.jvm.hotspot.runtime.*; +++import sun.jvm.hotspot.utilities.*; +++import sun.jvm.hotspot.utilities.Observable; +++import sun.jvm.hotspot.utilities.Observer; +++ +++public class RISCV64JavaCallWrapper extends JavaCallWrapper { +++ private static AddressField lastJavaFPField; +++ +++ static { +++ VM.registerVMInitializedObserver(new Observer() { +++ public void update(Observable o, Object data) { +++ initialize(VM.getVM().getTypeDataBase()); +++ } +++ }); +++ } +++ +++ private static synchronized void initialize(TypeDataBase db) { +++ Type type = db.lookupType("JavaFrameAnchor"); +++ +++ lastJavaFPField = type.getAddressField("_last_Java_fp"); +++ } +++ +++ public RISCV64JavaCallWrapper(Address addr) { +++ super(addr); +++ } +++ +++ public Address getLastJavaFP() { +++ return lastJavaFPField.getValue(addr.addOffsetTo(anchorField.getOffset())); +++ } +++} ++--- /dev/null +++++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/riscv64/RISCV64RegisterMap.java ++@@ -0,0 +1,53 @@ +++/* +++ * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2015, Red Hat Inc. +++ * Copyright (c) 2021, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ * +++ */ +++ +++package sun.jvm.hotspot.runtime.riscv64; +++ +++import sun.jvm.hotspot.debugger.*; +++import sun.jvm.hotspot.runtime.*; +++ +++public class RISCV64RegisterMap extends RegisterMap { +++ +++ /** This is the only public constructor */ +++ public RISCV64RegisterMap(JavaThread thread, boolean updateMap) { +++ super(thread, updateMap); +++ } +++ +++ protected RISCV64RegisterMap(RegisterMap map) { +++ super(map); +++ } +++ +++ public Object clone() { +++ RISCV64RegisterMap retval = new RISCV64RegisterMap(this); +++ return retval; +++ } +++ +++ // no PD state to clear or copy: +++ protected void clearPD() {} +++ protected void initializePD() {} +++ protected void initializeFromPD(RegisterMap map) {} +++ protected Address getLocationPD(VMReg reg) { return null; } +++} ++--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/utilities/PlatformInfo.java +++++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/utilities/PlatformInfo.java ++@@ -1,5 +1,5 @@ ++ /* ++- * Copyright (c) 2000, 2020, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2000, 2022, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++@@ -50,7 +50,7 @@ public class PlatformInfo { ++ ++ public static boolean knownCPU(String cpu) { ++ final String[] KNOWN = ++- new String[] {"i386", "x86", "x86_64", "amd64", "ppc64", "ppc64le", "aarch64"}; +++ new String[] {"i386", "x86", "x86_64", "amd64", "ppc64", "ppc64le", "aarch64", "riscv64"}; ++ ++ for(String s : KNOWN) { ++ if(s.equals(cpu)) ++--- a/test/hotspot/jtreg/compiler/c2/TestBit.java +++++ b/test/hotspot/jtreg/compiler/c2/TestBit.java ++@@ -1,5 +1,5 @@ ++ /* ++- * Copyright (c) 2020, 2021, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++@@ -33,7 +33,7 @@ import jdk.test.lib.process.ProcessTools ++ * @library /test/lib / ++ * ++ * @requires vm.flagless ++- * @requires os.arch=="aarch64" | os.arch=="amd64" | os.arch == "ppc64le" +++ * @requires os.arch=="aarch64" | os.arch=="amd64" | os.arch == "ppc64le" | os.arch == "riscv64" ++ * @requires vm.debug == true & vm.compiler2.enabled ++ * ++ * @run driver compiler.c2.TestBit ++@@ -55,7 +55,8 @@ public class TestBit { ++ String expectedTestBitInstruction = ++ "ppc64le".equals(System.getProperty("os.arch")) ? "ANDI" : ++ "aarch64".equals(System.getProperty("os.arch")) ? "tb" : ++- "amd64".equals(System.getProperty("os.arch")) ? "test" : null; +++ "amd64".equals(System.getProperty("os.arch")) ? "test" : +++ "riscv64".equals(System.getProperty("os.arch")) ? "andi" : null; ++ ++ if (expectedTestBitInstruction != null) { ++ output.shouldContain(expectedTestBitInstruction); ++--- /dev/null +++++ b/test/hotspot/jtreg/compiler/floatingpoint/TestLibmIntrinsics.java ++@@ -0,0 +1,80 @@ +++/* +++ * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2022, Alibaba Group Holding Limited. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ */ +++ +++/* +++ * @test +++ * @summary Test libm intrinsics +++ * @library /test/lib / +++ * +++ * @build jdk.test.whitebox.WhiteBox +++ * @run driver jdk.test.lib.helpers.ClassFileInstaller jdk.test.whitebox.WhiteBox +++ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI +++ * -XX:-BackgroundCompilation -XX:-UseOnStackReplacement +++ * compiler.floatingpoint.TestLibmIntrinsics +++ */ +++ +++package compiler.floatingpoint; +++ +++import compiler.whitebox.CompilerWhiteBoxTest; +++import jdk.test.whitebox.WhiteBox; +++ +++import java.lang.reflect.Method; +++ +++public class TestLibmIntrinsics { +++ +++ private static final WhiteBox WHITE_BOX = WhiteBox.getWhiteBox(); +++ +++ private static final double pi = 3.1415926; +++ +++ private static final double expected = 2.5355263553695413; +++ +++ static double m() { +++ return Math.pow(pi, Math.sin(Math.cos(Math.tan(Math.log(Math.log10(Math.exp(pi))))))); +++ } +++ +++ static public void main(String[] args) throws NoSuchMethodException { +++ Method test_method = compiler.floatingpoint.TestLibmIntrinsics.class.getDeclaredMethod("m"); +++ +++ double interpreter_result = m(); +++ +++ // Compile with C1 if possible +++ WHITE_BOX.enqueueMethodForCompilation(test_method, CompilerWhiteBoxTest.COMP_LEVEL_SIMPLE); +++ +++ double c1_result = m(); +++ +++ WHITE_BOX.deoptimizeMethod(test_method); +++ +++ // Compile it with C2 if possible +++ WHITE_BOX.enqueueMethodForCompilation(test_method, CompilerWhiteBoxTest.COMP_LEVEL_FULL_OPTIMIZATION); +++ +++ double c2_result = m(); +++ +++ if (interpreter_result != c1_result || +++ interpreter_result != c2_result || +++ c1_result != c2_result) { +++ System.out.println("interpreter = " + interpreter_result + " c1 = " + c1_result + " c2 = " + c2_result); +++ throw new RuntimeException("Test Failed"); +++ } +++ } +++} ++--- a/test/hotspot/jtreg/compiler/intrinsics/sha/cli/TestUseSHA1IntrinsicsOptionOnUnsupportedCPU.java +++++ b/test/hotspot/jtreg/compiler/intrinsics/sha/cli/TestUseSHA1IntrinsicsOptionOnUnsupportedCPU.java ++@@ -1,5 +1,5 @@ ++ /* ++- * Copyright (c) 2014, 2021, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2014, 2022, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++@@ -39,6 +39,7 @@ package compiler.intrinsics.sha.cli; ++ ++ import compiler.intrinsics.sha.cli.testcases.GenericTestCaseForOtherCPU; ++ import compiler.intrinsics.sha.cli.testcases.GenericTestCaseForUnsupportedAArch64CPU; +++import compiler.intrinsics.sha.cli.testcases.GenericTestCaseForUnsupportedRISCV64CPU; ++ import compiler.intrinsics.sha.cli.testcases.GenericTestCaseForUnsupportedX86CPU; ++ import compiler.intrinsics.sha.cli.testcases.UseSHAIntrinsicsSpecificTestCaseForUnsupportedCPU; ++ ++@@ -49,6 +50,8 @@ public class TestUseSHA1IntrinsicsOption ++ DigestOptionsBase.USE_SHA1_INTRINSICS_OPTION), ++ new GenericTestCaseForUnsupportedAArch64CPU( ++ DigestOptionsBase.USE_SHA1_INTRINSICS_OPTION), +++ new GenericTestCaseForUnsupportedRISCV64CPU( +++ DigestOptionsBase.USE_SHA1_INTRINSICS_OPTION), ++ new UseSHAIntrinsicsSpecificTestCaseForUnsupportedCPU( ++ DigestOptionsBase.USE_SHA1_INTRINSICS_OPTION), ++ new GenericTestCaseForOtherCPU( ++--- a/test/hotspot/jtreg/compiler/intrinsics/sha/cli/TestUseSHA256IntrinsicsOptionOnUnsupportedCPU.java +++++ b/test/hotspot/jtreg/compiler/intrinsics/sha/cli/TestUseSHA256IntrinsicsOptionOnUnsupportedCPU.java ++@@ -1,5 +1,5 @@ ++ /* ++- * Copyright (c) 2014, 2021, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2014, 2022, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++@@ -39,6 +39,7 @@ package compiler.intrinsics.sha.cli; ++ ++ import compiler.intrinsics.sha.cli.testcases.GenericTestCaseForOtherCPU; ++ import compiler.intrinsics.sha.cli.testcases.GenericTestCaseForUnsupportedAArch64CPU; +++import compiler.intrinsics.sha.cli.testcases.GenericTestCaseForUnsupportedRISCV64CPU; ++ import compiler.intrinsics.sha.cli.testcases.GenericTestCaseForUnsupportedX86CPU; ++ import compiler.intrinsics.sha.cli.testcases.UseSHAIntrinsicsSpecificTestCaseForUnsupportedCPU; ++ ++@@ -49,6 +50,8 @@ public class TestUseSHA256IntrinsicsOpti ++ DigestOptionsBase.USE_SHA256_INTRINSICS_OPTION), ++ new GenericTestCaseForUnsupportedAArch64CPU( ++ DigestOptionsBase.USE_SHA256_INTRINSICS_OPTION), +++ new GenericTestCaseForUnsupportedRISCV64CPU( +++ DigestOptionsBase.USE_SHA256_INTRINSICS_OPTION), ++ new UseSHAIntrinsicsSpecificTestCaseForUnsupportedCPU( ++ DigestOptionsBase.USE_SHA256_INTRINSICS_OPTION), ++ new GenericTestCaseForOtherCPU( ++--- a/test/hotspot/jtreg/compiler/intrinsics/sha/cli/TestUseSHA512IntrinsicsOptionOnUnsupportedCPU.java +++++ b/test/hotspot/jtreg/compiler/intrinsics/sha/cli/TestUseSHA512IntrinsicsOptionOnUnsupportedCPU.java ++@@ -1,5 +1,5 @@ ++ /* ++- * Copyright (c) 2014, 2021, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2014, 2022, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++@@ -39,6 +39,7 @@ package compiler.intrinsics.sha.cli; ++ ++ import compiler.intrinsics.sha.cli.testcases.GenericTestCaseForOtherCPU; ++ import compiler.intrinsics.sha.cli.testcases.GenericTestCaseForUnsupportedAArch64CPU; +++import compiler.intrinsics.sha.cli.testcases.GenericTestCaseForUnsupportedRISCV64CPU; ++ import compiler.intrinsics.sha.cli.testcases.GenericTestCaseForUnsupportedX86CPU; ++ import compiler.intrinsics.sha.cli.testcases.UseSHAIntrinsicsSpecificTestCaseForUnsupportedCPU; ++ ++@@ -49,6 +50,8 @@ public class TestUseSHA512IntrinsicsOpti ++ DigestOptionsBase.USE_SHA512_INTRINSICS_OPTION), ++ new GenericTestCaseForUnsupportedAArch64CPU( ++ DigestOptionsBase.USE_SHA512_INTRINSICS_OPTION), +++ new GenericTestCaseForUnsupportedRISCV64CPU( +++ DigestOptionsBase.USE_SHA512_INTRINSICS_OPTION), ++ new UseSHAIntrinsicsSpecificTestCaseForUnsupportedCPU( ++ DigestOptionsBase.USE_SHA512_INTRINSICS_OPTION), ++ new GenericTestCaseForOtherCPU( ++--- a/test/hotspot/jtreg/compiler/intrinsics/sha/cli/TestUseSHAOptionOnUnsupportedCPU.java +++++ b/test/hotspot/jtreg/compiler/intrinsics/sha/cli/TestUseSHAOptionOnUnsupportedCPU.java ++@@ -1,5 +1,5 @@ ++ /* ++- * Copyright (c) 2014, 2021, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2014, 2022, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++@@ -39,6 +39,7 @@ package compiler.intrinsics.sha.cli; ++ ++ import compiler.intrinsics.sha.cli.testcases.GenericTestCaseForOtherCPU; ++ import compiler.intrinsics.sha.cli.testcases.GenericTestCaseForUnsupportedAArch64CPU; +++import compiler.intrinsics.sha.cli.testcases.GenericTestCaseForUnsupportedRISCV64CPU; ++ import compiler.intrinsics.sha.cli.testcases.GenericTestCaseForUnsupportedX86CPU; ++ import compiler.intrinsics.sha.cli.testcases.UseSHASpecificTestCaseForUnsupportedCPU; ++ ++@@ -49,6 +50,8 @@ public class TestUseSHAOptionOnUnsupport ++ DigestOptionsBase.USE_SHA_OPTION), ++ new GenericTestCaseForUnsupportedAArch64CPU( ++ DigestOptionsBase.USE_SHA_OPTION), +++ new GenericTestCaseForUnsupportedRISCV64CPU( +++ DigestOptionsBase.USE_SHA_OPTION), ++ new UseSHASpecificTestCaseForUnsupportedCPU( ++ DigestOptionsBase.USE_SHA_OPTION), ++ new GenericTestCaseForOtherCPU( ++--- a/test/hotspot/jtreg/compiler/intrinsics/sha/cli/testcases/GenericTestCaseForOtherCPU.java +++++ b/test/hotspot/jtreg/compiler/intrinsics/sha/cli/testcases/GenericTestCaseForOtherCPU.java ++@@ -1,5 +1,5 @@ ++ /* ++- * Copyright (c) 2014, 2020, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2014, 2022, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++@@ -32,7 +32,7 @@ import jdk.test.lib.cli.predicate.OrPred ++ ++ /** ++ * Generic test case for SHA-related options targeted to any CPU except ++- * AArch64, PPC, S390x, and X86. +++ * AArch64, RISCV64, PPC, S390x, and X86. ++ */ ++ public class GenericTestCaseForOtherCPU extends ++ DigestOptionsBase.TestCase { ++@@ -44,13 +44,14 @@ public class GenericTestCaseForOtherCPU ++ } ++ ++ public GenericTestCaseForOtherCPU(String optionName, boolean checkUseSHA) { ++- // Execute the test case on any CPU except AArch64, PPC, S390x, and X86. +++ // Execute the test case on any CPU except AArch64, RISCV64, PPC, S390x, and X86. ++ super(optionName, new NotPredicate( ++ new OrPredicate(Platform::isAArch64, +++ new OrPredicate(Platform::isRISCV64, ++ new OrPredicate(Platform::isS390x, ++ new OrPredicate(Platform::isPPC, ++ new OrPredicate(Platform::isX64, ++- Platform::isX86)))))); +++ Platform::isX86))))))); ++ ++ this.checkUseSHA = checkUseSHA; ++ } ++@@ -59,7 +60,7 @@ public class GenericTestCaseForOtherCPU ++ protected void verifyWarnings() throws Throwable { ++ String shouldPassMessage = String.format("JVM should start with " ++ + "option '%s' without any warnings", optionName); ++- // Verify that on non-x86 and non-AArch64 CPU usage of SHA-related +++ // Verify that on non-x86, non-RISCV64 and non-AArch64 CPU usage of SHA-related ++ // options will not cause any warnings. ++ CommandLineOptionTest.verifySameJVMStartup(null, ++ new String[] { ".*" + optionName + ".*" }, shouldPassMessage, ++--- /dev/null +++++ b/test/hotspot/jtreg/compiler/intrinsics/sha/cli/testcases/GenericTestCaseForUnsupportedRISCV64CPU.java ++@@ -0,0 +1,115 @@ +++/* +++ * Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved. +++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +++ * +++ * This code is free software; you can redistribute it and/or modify it +++ * under the terms of the GNU General Public License version 2 only, as +++ * published by the Free Software Foundation. +++ * +++ * This code is distributed in the hope that it will be useful, but WITHOUT +++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +++ * version 2 for more details (a copy is included in the LICENSE file that +++ * accompanied this code). +++ * +++ * You should have received a copy of the GNU General Public License version +++ * 2 along with this work; if not, write to the Free Software Foundation, +++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +++ * +++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +++ * or visit www.oracle.com if you need additional information or have any +++ * questions. +++ */ +++ +++package compiler.intrinsics.sha.cli.testcases; +++ +++import compiler.intrinsics.sha.cli.DigestOptionsBase; +++import jdk.test.lib.process.ExitCode; +++import jdk.test.lib.Platform; +++import jdk.test.lib.cli.CommandLineOptionTest; +++import jdk.test.lib.cli.predicate.AndPredicate; +++import jdk.test.lib.cli.predicate.NotPredicate; +++ +++/** +++ * Generic test case for SHA-related options targeted to RISCV64 CPUs +++ * which don't support instruction required by the tested option. +++ */ +++public class GenericTestCaseForUnsupportedRISCV64CPU extends +++ DigestOptionsBase.TestCase { +++ +++ final private boolean checkUseSHA; +++ +++ public GenericTestCaseForUnsupportedRISCV64CPU(String optionName) { +++ this(optionName, true); +++ } +++ +++ public GenericTestCaseForUnsupportedRISCV64CPU(String optionName, boolean checkUseSHA) { +++ super(optionName, new AndPredicate(Platform::isRISCV64, +++ new NotPredicate(DigestOptionsBase.getPredicateForOption( +++ optionName)))); +++ +++ this.checkUseSHA = checkUseSHA; +++ } +++ +++ @Override +++ protected void verifyWarnings() throws Throwable { +++ String shouldPassMessage = String.format("JVM startup should pass with" +++ + "option '-XX:-%s' without any warnings", optionName); +++ //Verify that option could be disabled without any warnings. +++ CommandLineOptionTest.verifySameJVMStartup(null, new String[] { +++ DigestOptionsBase.getWarningForUnsupportedCPU(optionName) +++ }, shouldPassMessage, shouldPassMessage, ExitCode.OK, +++ DigestOptionsBase.UNLOCK_DIAGNOSTIC_VM_OPTIONS, +++ CommandLineOptionTest.prepareBooleanFlag(optionName, false)); +++ +++ if (checkUseSHA) { +++ shouldPassMessage = String.format("If JVM is started with '-XX:-" +++ + "%s' '-XX:+%s', output should contain warning.", +++ DigestOptionsBase.USE_SHA_OPTION, optionName); +++ +++ // Verify that when the tested option is enabled, then +++ // a warning will occur in VM output if UseSHA is disabled. +++ if (!optionName.equals(DigestOptionsBase.USE_SHA_OPTION)) { +++ CommandLineOptionTest.verifySameJVMStartup( +++ new String[] { DigestOptionsBase.getWarningForUnsupportedCPU(optionName) }, +++ null, +++ shouldPassMessage, +++ shouldPassMessage, +++ ExitCode.OK, +++ DigestOptionsBase.UNLOCK_DIAGNOSTIC_VM_OPTIONS, +++ CommandLineOptionTest.prepareBooleanFlag(DigestOptionsBase.USE_SHA_OPTION, false), +++ CommandLineOptionTest.prepareBooleanFlag(optionName, true)); +++ } +++ } +++ } +++ +++ @Override +++ protected void verifyOptionValues() throws Throwable { +++ // Verify that option is disabled by default. +++ CommandLineOptionTest.verifyOptionValueForSameVM(optionName, "false", +++ String.format("Option '%s' should be disabled by default", +++ optionName), +++ DigestOptionsBase.UNLOCK_DIAGNOSTIC_VM_OPTIONS); +++ +++ if (checkUseSHA) { +++ // Verify that option is disabled even if it was explicitly enabled +++ // using CLI options. +++ CommandLineOptionTest.verifyOptionValueForSameVM(optionName, "false", +++ String.format("Option '%s' should be off on unsupported " +++ + "RISCV64CPU even if set to true directly", optionName), +++ DigestOptionsBase.UNLOCK_DIAGNOSTIC_VM_OPTIONS, +++ CommandLineOptionTest.prepareBooleanFlag(optionName, true)); +++ +++ // Verify that option is disabled when +UseSHA was passed to JVM. +++ CommandLineOptionTest.verifyOptionValueForSameVM(optionName, "false", +++ String.format("Option '%s' should be off on unsupported " +++ + "RISCV64CPU even if %s flag set to JVM", +++ optionName, CommandLineOptionTest.prepareBooleanFlag( +++ DigestOptionsBase.USE_SHA_OPTION, true)), +++ DigestOptionsBase.UNLOCK_DIAGNOSTIC_VM_OPTIONS, +++ CommandLineOptionTest.prepareBooleanFlag( +++ DigestOptionsBase.USE_SHA_OPTION, true)); +++ } +++ } +++} ++--- a/test/hotspot/jtreg/compiler/loopopts/superword/ProdRed_Double.java +++++ b/test/hotspot/jtreg/compiler/loopopts/superword/ProdRed_Double.java ++@@ -1,5 +1,5 @@ ++ /* ++- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++@@ -25,7 +25,7 @@ ++ * @test ++ * @bug 8074981 ++ * @summary Add C2 x86 Superword support for scalar product reduction optimizations : float test ++- * @requires os.arch=="x86" | os.arch=="i386" | os.arch=="amd64" | os.arch=="x86_64" | os.arch=="aarch64" +++ * @requires os.arch=="x86" | os.arch=="i386" | os.arch=="amd64" | os.arch=="x86_64" | os.arch=="aarch64" | os.arch=="riscv64" ++ * ++ * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:LoopUnrollLimit=250 ++ * -XX:CompileThresholdScaling=0.1 ++--- a/test/hotspot/jtreg/compiler/loopopts/superword/ProdRed_Float.java +++++ b/test/hotspot/jtreg/compiler/loopopts/superword/ProdRed_Float.java ++@@ -1,5 +1,5 @@ ++ /* ++- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++@@ -25,7 +25,7 @@ ++ * @test ++ * @bug 8074981 ++ * @summary Add C2 x86 Superword support for scalar product reduction optimizations : float test ++- * @requires os.arch=="x86" | os.arch=="i386" | os.arch=="amd64" | os.arch=="x86_64" | os.arch=="aarch64" +++ * @requires os.arch=="x86" | os.arch=="i386" | os.arch=="amd64" | os.arch=="x86_64" | os.arch=="aarch64" | os.arch=="riscv64" ++ * ++ * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:LoopUnrollLimit=250 ++ * -XX:CompileThresholdScaling=0.1 ++--- a/test/hotspot/jtreg/compiler/loopopts/superword/ProdRed_Int.java +++++ b/test/hotspot/jtreg/compiler/loopopts/superword/ProdRed_Int.java ++@@ -1,5 +1,5 @@ ++ /* ++- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++@@ -25,7 +25,7 @@ ++ * @test ++ * @bug 8074981 ++ * @summary Add C2 x86 Superword support for scalar product reduction optimizations : int test ++- * @requires os.arch=="x86" | os.arch=="i386" | os.arch=="amd64" | os.arch=="x86_64" | os.arch=="aarch64" +++ * @requires os.arch=="x86" | os.arch=="i386" | os.arch=="amd64" | os.arch=="x86_64" | os.arch=="aarch64" | os.arch=="riscv64" ++ * ++ * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:LoopUnrollLimit=250 ++ * -XX:CompileThresholdScaling=0.1 ++--- a/test/hotspot/jtreg/compiler/loopopts/superword/ReductionPerf.java +++++ b/test/hotspot/jtreg/compiler/loopopts/superword/ReductionPerf.java ++@@ -1,5 +1,5 @@ ++ /* ++- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++@@ -25,7 +25,7 @@ ++ * @test ++ * @bug 8074981 ++ * @summary Add C2 x86 Superword support for scalar product reduction optimizations : int test ++- * @requires os.arch=="x86" | os.arch=="i386" | os.arch=="amd64" | os.arch=="x86_64" | os.arch=="aarch64" +++ * @requires os.arch=="x86" | os.arch=="i386" | os.arch=="amd64" | os.arch=="x86_64" | os.arch=="aarch64" | os.arch=="riscv64" ++ * ++ * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions ++ * -XX:LoopUnrollLimit=250 -XX:CompileThresholdScaling=0.1 ++--- a/test/hotspot/jtreg/compiler/loopopts/superword/SumRedAbsNeg_Double.java +++++ b/test/hotspot/jtreg/compiler/loopopts/superword/SumRedAbsNeg_Double.java ++@@ -1,5 +1,5 @@ ++ /* ++- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++@@ -25,7 +25,7 @@ ++ * @test ++ * @bug 8138583 ++ * @summary Add C2 AArch64 Superword support for scalar sum reduction optimizations : double abs & neg test ++- * @requires os.arch=="aarch64" +++ * @requires os.arch=="aarch64" | os.arch=="riscv64" ++ * ++ * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:LoopUnrollLimit=250 ++ * -XX:CompileThresholdScaling=0.1 ++--- a/test/hotspot/jtreg/compiler/loopopts/superword/SumRedAbsNeg_Float.java +++++ b/test/hotspot/jtreg/compiler/loopopts/superword/SumRedAbsNeg_Float.java ++@@ -1,5 +1,5 @@ ++ /* ++- * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++@@ -25,7 +25,7 @@ ++ * @test ++ * @bug 8138583 ++ * @summary Add C2 AArch64 Superword support for scalar sum reduction optimizations : float abs & neg test ++- * @requires os.arch=="aarch64" +++ * @requires os.arch=="aarch64" | os.arch=="riscv64" ++ * ++ * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:LoopUnrollLimit=250 ++ * -XX:CompileThresholdScaling=0.1 ++--- a/test/hotspot/jtreg/compiler/loopopts/superword/SumRedSqrt_Double.java +++++ b/test/hotspot/jtreg/compiler/loopopts/superword/SumRedSqrt_Double.java ++@@ -1,5 +1,5 @@ ++ /* ++- * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2014, 2022, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++@@ -25,7 +25,7 @@ ++ * @test ++ * @bug 8135028 ++ * @summary Add C2 x86 Superword support for scalar sum reduction optimizations : double sqrt test ++- * @requires os.arch=="x86" | os.arch=="i386" | os.arch=="amd64" | os.arch=="x86_64" | os.arch=="aarch64" +++ * @requires os.arch=="x86" | os.arch=="i386" | os.arch=="amd64" | os.arch=="x86_64" | os.arch=="aarch64" | os.arch=="riscv64" ++ * ++ * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:LoopUnrollLimit=250 ++ * -XX:CompileThresholdScaling=0.1 ++--- a/test/hotspot/jtreg/compiler/loopopts/superword/SumRed_Double.java +++++ b/test/hotspot/jtreg/compiler/loopopts/superword/SumRed_Double.java ++@@ -1,5 +1,5 @@ ++ /* ++- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++@@ -25,7 +25,7 @@ ++ * @test ++ * @bug 8074981 ++ * @summary Add C2 x86 Superword support for scalar sum reduction optimizations : double test ++- * @requires os.arch=="x86" | os.arch=="i386" | os.arch=="amd64" | os.arch=="x86_64" | os.arch=="aarch64" +++ * @requires os.arch=="x86" | os.arch=="i386" | os.arch=="amd64" | os.arch=="x86_64" | os.arch=="aarch64" | os.arch=="riscv64" ++ * ++ * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:LoopUnrollLimit=250 ++ * -XX:CompileThresholdScaling=0.1 ++--- a/test/hotspot/jtreg/compiler/loopopts/superword/SumRed_Float.java +++++ b/test/hotspot/jtreg/compiler/loopopts/superword/SumRed_Float.java ++@@ -1,5 +1,5 @@ ++ /* ++- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++@@ -25,7 +25,7 @@ ++ * @test ++ * @bug 8074981 ++ * @summary Add C2 x86 Superword support for scalar sum reduction optimizations : float test ++- * @requires os.arch=="x86" | os.arch=="i386" | os.arch=="amd64" | os.arch=="x86_64" | os.arch=="aarch64" +++ * @requires os.arch=="x86" | os.arch=="i386" | os.arch=="amd64" | os.arch=="x86_64" | os.arch=="aarch64" | os.arch=="riscv64" ++ * ++ * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:LoopUnrollLimit=250 ++ * -XX:CompileThresholdScaling=0.1 ++--- a/test/hotspot/jtreg/compiler/loopopts/superword/SumRed_Int.java +++++ b/test/hotspot/jtreg/compiler/loopopts/superword/SumRed_Int.java ++@@ -1,5 +1,5 @@ ++ /* ++- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++@@ -25,7 +25,7 @@ ++ * @test ++ * @bug 8074981 ++ * @summary Add C2 x86 Superword support for scalar sum reduction optimizations : int test ++- * @requires os.arch=="x86" | os.arch=="i386" | os.arch=="amd64" | os.arch=="x86_64" | os.arch=="aarch64" +++ * @requires os.arch=="x86" | os.arch=="i386" | os.arch=="amd64" | os.arch=="x86_64" | os.arch=="aarch64" | os.arch=="riscv64" ++ * ++ * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:LoopUnrollLimit=250 ++ * -XX:CompileThresholdScaling=0.1 ++--- a/test/hotspot/jtreg/compiler/runtime/TestConstantsInError.java +++++ b/test/hotspot/jtreg/compiler/runtime/TestConstantsInError.java ++@@ -130,7 +130,7 @@ public abstract class TestConstantsInErr ++ results.shouldMatch("Test_C1/.*::test \\(3 bytes\\)$") ++ .shouldMatch("Test_C2/.*::test \\(3 bytes\\)$"); ++ ++- if (isC1 && Platform.isAArch64()) { // no code patching +++ if (isC1 && (Platform.isAArch64() || Platform.isRISCV64())) { // no code patching ++ results.shouldMatch("Test_C1/.*::test \\(3 bytes\\) made not entrant") ++ .shouldMatch("Test_C2/.*::test \\(3 bytes\\) made not entrant"); ++ } else { ++@@ -168,7 +168,7 @@ public abstract class TestConstantsInErr ++ .shouldMatch("Test_MH3/.*::test \\(3 bytes\\)$") ++ .shouldMatch("Test_MH4/.*::test \\(3 bytes\\)$"); ++ ++- if (isC1 && Platform.isAArch64()) { // no code patching +++ if (isC1 && (Platform.isAArch64() || Platform.isRISCV64())) { // no code patching ++ results.shouldMatch("Test_MH1/.*::test \\(3 bytes\\) made not entrant") ++ .shouldMatch("Test_MH2/.*::test \\(3 bytes\\) made not entrant") ++ .shouldMatch("Test_MH3/.*::test \\(3 bytes\\) made not entrant") ++@@ -191,7 +191,7 @@ public abstract class TestConstantsInErr ++ results.shouldMatch("Test_MT1/.*::test \\(3 bytes\\)$") ++ .shouldMatch("Test_MT2/.*::test \\(3 bytes\\)$"); ++ ++- if (isC1 && Platform.isAArch64()) { // no code patching +++ if (isC1 && (Platform.isAArch64() || Platform.isRISCV64())) { // no code patching ++ results.shouldMatch("Test_MT1/.*::test \\(3 bytes\\) made not entrant") ++ .shouldMatch("Test_MT2/.*::test \\(3 bytes\\) made not entrant"); ++ } else { ++--- a/test/hotspot/jtreg/compiler/runtime/criticalnatives/argumentcorruption/CheckLongArgs.java +++++ b/test/hotspot/jtreg/compiler/runtime/criticalnatives/argumentcorruption/CheckLongArgs.java ++@@ -24,7 +24,7 @@ ++ ++ /* @test ++ * @bug 8167409 ++- * @requires (os.arch != "aarch64") & (os.arch != "arm") & (vm.flavor != "zero") +++ * @requires (os.arch != "aarch64") & (os.arch != "arm") & (os.arch != "riscv64") & (vm.flavor != "zero") ++ * @run main/othervm/native -Xcomp -XX:+CriticalJNINatives compiler.runtime.criticalnatives.argumentcorruption.CheckLongArgs ++ */ ++ package compiler.runtime.criticalnatives.argumentcorruption; ++--- a/test/hotspot/jtreg/compiler/runtime/criticalnatives/lookup/LookUp.java +++++ b/test/hotspot/jtreg/compiler/runtime/criticalnatives/lookup/LookUp.java ++@@ -24,7 +24,7 @@ ++ ++ /* @test ++ * @bug 8167408 ++- * @requires (os.arch != "aarch64") & (os.arch != "arm") & (vm.flavor != "zero") +++ * @requires (os.arch != "aarch64") & (os.arch != "arm") & (os.arch != "riscv64") & (vm.flavor != "zero") ++ * @run main/othervm/native -Xcomp -XX:+CriticalJNINatives compiler.runtime.criticalnatives.lookup.LookUp ++ */ ++ package compiler.runtime.criticalnatives.lookup; ++--- a/test/hotspot/jtreg/compiler/testlibrary/sha/predicate/IntrinsicPredicates.java +++++ b/test/hotspot/jtreg/compiler/testlibrary/sha/predicate/IntrinsicPredicates.java ++@@ -1,5 +1,5 @@ ++ /* ++- * Copyright (c) 2014, 2020, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2014, 2022, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++@@ -68,14 +68,16 @@ public class IntrinsicPredicates { ++ ++ public static final BooleanSupplier SHA1_INSTRUCTION_AVAILABLE ++ = new OrPredicate(new CPUSpecificPredicate("aarch64.*", new String[] { "sha1" }, null), +++ new OrPredicate(new CPUSpecificPredicate("riscv64.*", new String[] { "sha1" }, null), ++ new OrPredicate(new CPUSpecificPredicate("s390.*", new String[] { "sha1" }, null), ++ // x86 variants ++ new OrPredicate(new CPUSpecificPredicate("amd64.*", new String[] { "sha" }, null), ++ new OrPredicate(new CPUSpecificPredicate("i386.*", new String[] { "sha" }, null), ++- new CPUSpecificPredicate("x86.*", new String[] { "sha" }, null))))); +++ new CPUSpecificPredicate("x86.*", new String[] { "sha" }, null)))))); ++ ++ public static final BooleanSupplier SHA256_INSTRUCTION_AVAILABLE ++ = new OrPredicate(new CPUSpecificPredicate("aarch64.*", new String[] { "sha256" }, null), +++ new OrPredicate(new CPUSpecificPredicate("riscv64.*", new String[] { "sha256" }, null), ++ new OrPredicate(new CPUSpecificPredicate("s390.*", new String[] { "sha256" }, null), ++ new OrPredicate(new CPUSpecificPredicate("ppc64.*", new String[] { "sha" }, null), ++ new OrPredicate(new CPUSpecificPredicate("ppc64le.*", new String[] { "sha" }, null), ++@@ -84,10 +86,11 @@ public class IntrinsicPredicates { ++ new OrPredicate(new CPUSpecificPredicate("i386.*", new String[] { "sha" }, null), ++ new OrPredicate(new CPUSpecificPredicate("x86.*", new String[] { "sha" }, null), ++ new OrPredicate(new CPUSpecificPredicate("amd64.*", new String[] { "avx2", "bmi2" }, null), ++- new CPUSpecificPredicate("x86_64", new String[] { "avx2", "bmi2" }, null))))))))); +++ new CPUSpecificPredicate("x86_64", new String[] { "avx2", "bmi2" }, null)))))))))); ++ ++ public static final BooleanSupplier SHA512_INSTRUCTION_AVAILABLE ++ = new OrPredicate(new CPUSpecificPredicate("aarch64.*", new String[] { "sha512" }, null), +++ new OrPredicate(new CPUSpecificPredicate("riscv64.*", new String[] { "sha512" }, null), ++ new OrPredicate(new CPUSpecificPredicate("s390.*", new String[] { "sha512" }, null), ++ new OrPredicate(new CPUSpecificPredicate("ppc64.*", new String[] { "sha" }, null), ++ new OrPredicate(new CPUSpecificPredicate("ppc64le.*", new String[] { "sha" }, null), ++@@ -96,7 +99,7 @@ public class IntrinsicPredicates { ++ new OrPredicate(new CPUSpecificPredicate("i386.*", new String[] { "sha" }, null), ++ new OrPredicate(new CPUSpecificPredicate("x86.*", new String[] { "sha" }, null), ++ new OrPredicate(new CPUSpecificPredicate("amd64.*", new String[] { "avx2", "bmi2" }, null), ++- new CPUSpecificPredicate("x86_64", new String[] { "avx2", "bmi2" }, null))))))))); +++ new CPUSpecificPredicate("x86_64", new String[] { "avx2", "bmi2" }, null)))))))))); ++ ++ public static final BooleanSupplier SHA3_INSTRUCTION_AVAILABLE ++ // sha3 is only implemented on aarch64 for now ++--- a/test/hotspot/jtreg/runtime/NMT/CheckForProperDetailStackTrace.java +++++ b/test/hotspot/jtreg/runtime/NMT/CheckForProperDetailStackTrace.java ++@@ -1,5 +1,5 @@ ++ /* ++- * Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++@@ -133,7 +133,7 @@ public class CheckForProperDetailStackTr ++ // It's ok for ARM not to have symbols, because it does not support NMT detail ++ // when targeting thumb2. It's also ok for Windows not to have symbols, because ++ // they are only available if the symbols file is included with the build. ++- if (Platform.isWindows() || Platform.isARM()) { +++ if (Platform.isWindows() || Platform.isARM() || Platform.isRISCV64()) { ++ return; // we are done ++ } ++ output.reportDiagnosticSummary(); ++--- a/test/hotspot/jtreg/runtime/ReservedStack/ReservedStackTest.java +++++ b/test/hotspot/jtreg/runtime/ReservedStack/ReservedStackTest.java ++@@ -1,5 +1,5 @@ ++ /* ++- * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++@@ -240,7 +240,7 @@ public class ReservedStackTest { ++ return Platform.isAix() || ++ (Platform.isLinux() && ++ (Platform.isPPC() || Platform.isS390x() || Platform.isX64() || ++- Platform.isX86() || Platform.isAArch64())) || +++ Platform.isX86() || Platform.isAArch64() || Platform.isRISCV64())) || ++ Platform.isOSX(); ++ } ++ ++--- a/test/hotspot/jtreg/serviceability/AsyncGetCallTrace/MyPackage/ASGCTBaseTest.java +++++ b/test/hotspot/jtreg/serviceability/AsyncGetCallTrace/MyPackage/ASGCTBaseTest.java ++@@ -1,5 +1,5 @@ ++ /* ++- * Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2019, 2022, Oracle and/or its affiliates. All rights reserved. ++ * Copyright (c) 2019, Google and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++@@ -29,7 +29,7 @@ package MyPackage; ++ * @summary Verifies that AsyncGetCallTrace is call-able and provides sane information. ++ * @compile ASGCTBaseTest.java ++ * @requires os.family == "linux" ++- * @requires os.arch=="x86" | os.arch=="i386" | os.arch=="amd64" | os.arch=="x86_64" | os.arch=="arm" | os.arch=="aarch64" | os.arch=="ppc64" | os.arch=="s390" +++ * @requires os.arch=="x86" | os.arch=="i386" | os.arch=="amd64" | os.arch=="x86_64" | os.arch=="arm" | os.arch=="aarch64" | os.arch=="ppc64" | os.arch=="s390" | os.arch=="riscv64" ++ * @requires vm.jvmti ++ * @run main/othervm/native -agentlib:AsyncGetCallTraceTest MyPackage.ASGCTBaseTest ++ */ ++--- a/test/hotspot/jtreg/vmTestbase/nsk/share/jdi/ArgumentHandler.java +++++ b/test/hotspot/jtreg/vmTestbase/nsk/share/jdi/ArgumentHandler.java ++@@ -1,5 +1,5 @@ ++ /* ++- * Copyright (c) 2001, 2021, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2001, 2022, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++@@ -529,6 +529,7 @@ class CheckedFeatures { ++ {"linux-ppc64", "com.sun.jdi.SharedMemoryAttach"}, ++ {"linux-ppc64le", "com.sun.jdi.SharedMemoryAttach"}, ++ {"linux-s390x", "com.sun.jdi.SharedMemoryAttach"}, +++ {"linux-riscv64", "com.sun.jdi.SharedMemoryAttach"}, ++ {"macosx-amd64", "com.sun.jdi.SharedMemoryAttach"}, ++ {"mac-x64", "com.sun.jdi.SharedMemoryAttach"}, ++ {"macosx-aarch64", "com.sun.jdi.SharedMemoryAttach"}, ++@@ -554,6 +555,7 @@ class CheckedFeatures { ++ {"linux-ppc64", "com.sun.jdi.SharedMemoryListen"}, ++ {"linux-ppc64le", "com.sun.jdi.SharedMemoryListen"}, ++ {"linux-s390x", "com.sun.jdi.SharedMemoryListen"}, +++ {"linux-riscv64", "com.sun.jdi.SharedMemoryListen"}, ++ {"macosx-amd64", "com.sun.jdi.SharedMemoryListen"}, ++ {"mac-x64", "com.sun.jdi.SharedMemoryListen"}, ++ {"macosx-aarch64", "com.sun.jdi.SharedMemoryListen"}, ++@@ -600,6 +602,9 @@ class CheckedFeatures { ++ {"linux-s390x", "com.sun.jdi.CommandLineLaunch", "dt_shmem"}, ++ {"linux-s390x", "com.sun.jdi.RawCommandLineLaunch", "dt_shmem"}, ++ +++ {"linux-riscv64", "com.sun.jdi.CommandLineLaunch", "dt_shmem"}, +++ {"linux-riscv64", "com.sun.jdi.RawCommandLineLaunch", "dt_shmem"}, +++ ++ {"windows-i586", "com.sun.jdi.CommandLineLaunch", "dt_socket"}, ++ {"windows-i586", "com.sun.jdi.RawCommandLineLaunch", "dt_socket"}, ++ ++@@ -637,6 +642,7 @@ class CheckedFeatures { ++ {"linux-ppc64", "dt_shmem"}, ++ {"linux-ppc64le", "dt_shmem"}, ++ {"linux-s390x", "dt_shmem"}, +++ {"linux-riscv64", "dt_shmem"}, ++ {"macosx-amd64", "dt_shmem"}, ++ {"mac-x64", "dt_shmem"}, ++ {"macosx-aarch64", "dt_shmem"}, ++--- a/test/jdk/jdk/jfr/event/os/TestCPUInformation.java +++++ b/test/jdk/jdk/jfr/event/os/TestCPUInformation.java ++@@ -1,5 +1,5 @@ ++ /* ++- * Copyright (c) 2013, 2020, Oracle and/or its affiliates. All rights reserved. +++ * Copyright (c) 2013, 2022, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++@@ -52,8 +52,8 @@ public class TestCPUInformation { ++ Events.assertField(event, "hwThreads").atLeast(1); ++ Events.assertField(event, "cores").atLeast(1); ++ Events.assertField(event, "sockets").atLeast(1); ++- Events.assertField(event, "cpu").containsAny("Intel", "AMD", "Unknown x86", "ARM", "PPC", "PowerPC", "AArch64", "s390"); ++- Events.assertField(event, "description").containsAny("Intel", "AMD", "Unknown x86", "ARM", "PPC", "PowerPC", "AArch64", "s390"); +++ Events.assertField(event, "cpu").containsAny("Intel", "AMD", "Unknown x86", "ARM", "PPC", "PowerPC", "AArch64", "RISCV64", "s390"); +++ Events.assertField(event, "description").containsAny("Intel", "AMD", "Unknown x86", "ARM", "PPC", "PowerPC", "AArch64", "RISCV64", "s390"); ++ } ++ } ++ } ++--- a/test/lib-test/jdk/test/lib/TestMutuallyExclusivePlatformPredicates.java +++++ b/test/lib-test/jdk/test/lib/TestMutuallyExclusivePlatformPredicates.java ++@@ -45,7 +45,7 @@ import java.util.Set; ++ */ ++ public class TestMutuallyExclusivePlatformPredicates { ++ private static enum MethodGroup { ++- ARCH("isAArch64", "isARM", "isPPC", "isS390x", "isX64", "isX86"), +++ ARCH("isAArch64", "isARM", "isRISCV64", "isPPC", "isS390x", "isX64", "isX86"), ++ BITNESS("is32bit", "is64bit"), ++ OS("isAix", "isLinux", "isOSX", "isWindows"), ++ VM_TYPE("isClient", "isServer", "isMinimal", "isZero", "isEmbedded"), ++--- a/test/lib/jdk/test/lib/Platform.java +++++ b/test/lib/jdk/test/lib/Platform.java ++@@ -210,6 +210,10 @@ public class Platform { ++ return isArch("arm.*"); ++ } ++ +++ public static boolean isRISCV64() { +++ return isArch("riscv64"); +++ } +++ ++ public static boolean isPPC() { ++ return isArch("ppc.*"); ++ } diff --cc debian/patches/Don-t-optimize-fdlibm-fork-for-Zero-on-linux-sparc-Z.patch index 000000000,000000000..6f1d7ee45 new file mode 100644 --- /dev/null +++ b/debian/patches/Don-t-optimize-fdlibm-fork-for-Zero-on-linux-sparc-Z.patch @@@ -1,0 -1,0 +1,27 @@@ ++From 7ce2609d6ff5299f2ed3ff6850a3cab4a16125f8 Mon Sep 17 00:00:00 2001 ++From: John Paul Adrian Glaubitz ++Date: Fri, 21 Dec 2018 15:18:17 +0300 ++Subject: [PATCH] Don't optimize fdlibm-fork for Zero on linux-sparc (Zero) ++ ++--- ++ make/hotspot/lib/JvmOverrideFiles.gmk | 9 +++++++++ ++ 1 file changed, 9 insertions(+) ++ ++--- a/make/hotspot/lib/JvmOverrideFiles.gmk +++++ b/make/hotspot/lib/JvmOverrideFiles.gmk ++@@ -48,6 +48,15 @@ ++ # by using -ffp-contract=off on GCC/Clang platforms. ++ ifneq ($(FDLIBM_CFLAGS), ) ++ LIBJVM_FDLIBM_COPY_OPT_FLAG := $(CXX_O_FLAG_NORM) +++ # Don't optimize fdlibm-fork for Zero on Linux sparc +++ # See JDK-XXXX +++ ifeq ($(call check-jvm-feature, zero), true) +++ ifeq ($(OPENJDK_TARGET_OS), linux) +++ ifeq ($(OPENJDK_TARGET_CPU_ARCH), sparc) +++ LIBJVM_FDLIBM_COPY_OPT_FLAG := $(CXX_O_FLAG_NONE) +++ endif +++ endif +++ endif ++ endif ++ ++ ifeq ($(call isTargetOs, linux), true) diff --cc debian/patches/adlc-parser.diff index 000000000,000000000..096aadb92 new file mode 100644 --- /dev/null +++ b/debian/patches/adlc-parser.diff @@@ -1,0 -1,0 +1,22 @@@ ++# DP: fixes an uninitialized memory issue in adlc ++--- a/src/hotspot/share/adlc/formsopt.cpp +++++ b/src/hotspot/share/adlc/formsopt.cpp ++@@ -436,6 +436,10 @@ ++ //==============================Frame Handling================================= ++ //------------------------------FrameForm-------------------------------------- ++ FrameForm::FrameForm() { +++ _sync_stack_slots = NULL; +++ _inline_cache_reg = NULL; +++ _interpreter_frame_pointer_reg = NULL; +++ _cisc_spilling_operand_name = NULL; ++ _frame_pointer = NULL; ++ _c_frame_pointer = NULL; ++ _alignment = NULL; ++@@ -444,7 +448,6 @@ ++ _varargs_C_out_slots_killed = NULL; ++ _return_value = NULL; ++ _c_return_value = NULL; ++- _interpreter_frame_pointer_reg = NULL; ++ } ++ ++ FrameForm::~FrameForm() { diff --cc debian/patches/atk-wrapper-security.diff index 000000000,000000000..6f35f6f45 new file mode 100644 --- /dev/null +++ b/debian/patches/atk-wrapper-security.diff @@@ -1,0 -1,0 +1,10 @@@ ++--- a/src/java.base/share/conf/security/java.security +++++ b/src/java.base/share/conf/security/java.security ++@@ -310,6 +310,7 @@ ++ # ++ package.access=sun.misc.,\ ++ sun.reflect.,\ +++ org.GNOME.Accessibility.,\ ++ ++ # ++ # List of comma-separated packages that start with or equal this string diff --cc debian/patches/build_gtest.patch index 000000000,000000000..0d7f6eacf new file mode 100644 --- /dev/null +++ b/debian/patches/build_gtest.patch @@@ -1,0 -1,0 +1,29 @@@ ++Description: Add support for building with the system google test ++ A major portion of hotspot tests require google test. ++ In order to get tier1 tests passing the package should build ++ with the system google test. ++Author: Vladimir Petko ++Bug-Ubuntu: https://bugs.launchpad.net/ubuntu/+source/openjdk-20/+bug/2012316 ++Last-Update: 2023-04-13 ++--- a/make/autoconf/lib-tests.m4 +++++ b/make/autoconf/lib-tests.m4 ++@@ -36,6 +36,8 @@ ++ AC_MSG_CHECKING([for gtest]) ++ if test "x${with_gtest}" = xno; then ++ AC_MSG_RESULT([no, disabled]) +++ elif test "x${with_gtest}" = xsystem; then +++ GTEST_FRAMEWORK_SRC=/usr/src/googletest ++ elif test "x${with_gtest}" = xyes; then ++ AC_MSG_RESULT([no, error]) ++ AC_MSG_ERROR([--with-gtest must have a value]) ++--- a/test/hotspot/gtest/gtestMain.cpp +++++ b/test/hotspot/gtest/gtestMain.cpp ++@@ -230,7 +230,7 @@ ++ bool is_vmassert_test = false; ++ bool is_othervm_test = false; ++ // death tests facility is used for both regular death tests, other vm and vmassert tests ++- if (::testing::internal::GTEST_FLAG(internal_run_death_test).length() > 0) { +++ if (::testing::GTEST_FLAG(internal_run_death_test).length() > 0) { ++ // when we execute death test, filter value equals to test name ++ const char* test_name = ::testing::GTEST_FLAG(filter).c_str(); ++ const char* const othervm_suffix = "_other_vm"; // TEST_OTHER_VM diff --cc debian/patches/default-jvm-cfg.diff index 000000000,000000000..d145cc1a9 new file mode 100644 --- /dev/null +++ b/debian/patches/default-jvm-cfg.diff @@@ -1,0 -1,0 +1,23 @@@ ++--- a/src/java.base/share/native/libjli/java.c +++++ b/src/java.base/share/native/libjli/java.c ++@@ -2000,7 +2000,7 @@ ++ ReadKnownVMs(const char *jvmCfgName, jboolean speculative) ++ { ++ FILE *jvmCfg; ++- char line[MAXPATHLEN+20]; +++ char line[MAXPATHLEN+30]; ++ int cnt = 0; ++ int lineno = 0; ++ jlong start = 0, end = 0; ++@@ -2015,6 +2015,11 @@ ++ ++ jvmCfg = fopen(jvmCfgName, "r"); ++ if (jvmCfg == NULL) { +++ char cfgName[strlen(jvmCfgName)+10]; +++ sprintf(cfgName, "%s-default", jvmCfgName); +++ jvmCfg = fopen(cfgName, "r"); +++ } +++ if (jvmCfg == NULL) { ++ if (!speculative) { ++ JLI_ReportErrorMessage(CFG_ERROR6, jvmCfgName); ++ exit(1); diff --cc debian/patches/dnd-files.diff index 000000000,000000000..0e6b09beb new file mode 100644 --- /dev/null +++ b/debian/patches/dnd-files.diff @@@ -1,0 -1,0 +1,20 @@@ ++--- a/src/java.desktop/unix/classes/sun/awt/X11/XDataTransferer.java +++++ b/src/java.desktop/unix/classes/sun/awt/X11/XDataTransferer.java ++@@ -273,14 +273,14 @@ ++ BufferedReader reader = new BufferedReader(isr)) { ++ String line; ++ ArrayList uriList = new ArrayList<>(); ++- URI uri; ++ while ((line = reader.readLine()) != null) { ++ try { ++- uri = new URI(line); +++ if (!"\0".equals(line)) { +++ uriList.add(new URI(line)); +++ } ++ } catch (URISyntaxException uriSyntaxException) { ++ throw new IOException(uriSyntaxException); ++ } ++- uriList.add(uri); ++ } ++ return uriList.toArray(new URI[uriList.size()]); ++ } diff --cc debian/patches/exclude-broken-tests.patch index 000000000,000000000..82f4238a9 new file mode 100644 --- /dev/null +++ b/debian/patches/exclude-broken-tests.patch @@@ -1,0 -1,0 +1,104 @@@ ++ Description: Disable failing tests ++ Disable tests that fail on Ubuntu pending investigation ++Author: Vladimir Petko ++Forwarded: not-needed ++Last-Update: 2023-03-24 ++--- a/test/hotspot/jtreg/ProblemList.txt +++++ b/test/hotspot/jtreg/ProblemList.txt ++@@ -38,6 +38,22 @@ ++ # ++ ############################################################################# ++ +++# tests that need to be investigated +++ +++gc/shenandoah/mxbeans/TestChurnNotifications.java#aggressive 0000000 generic-all +++gc/shenandoah/mxbeans/TestChurnNotifications.java#iu 0000000 generic-all +++ +++#result: Failed. Execution failed: `main' threw exception: java.io.IOException: Mount point not found +++# https://bugs.openjdk.org/browse/JDK-8166162 +++ +++runtime/LoadClass/LongBCP.java 8166162 generic-all +++ +++# Intermittent failures because LingeredApp.class is not found +++ +++runtime/cds/appcds/jcmd/JCmdTestDynamicDump.java 000000 generic-all +++runtime/cds/appcds/jcmd/JCmdTestFileSafety.java 000000 generic-all +++runtime/cds/appcds/jcmd/JCmdTestStaticDump.java 000000 generic-all +++ ++ # :hotspot_compiler ++ ++ compiler/ciReplay/TestSAServer.java 8029528 generic-all ++--- a/test/langtools/ProblemList.txt +++++ b/test/langtools/ProblemList.txt ++@@ -23,6 +23,11 @@ ++ # ++ ########################################################################### ++ +++# Wrong test environment in autopkgtest +++# java.lang.IllegalStateException: Launching JShell execution engine threw: ERROR: transport error 202: getaddrinfo: failed to parse address +++ +++jdk/jshell/JdiListeningLocalhostExecutionControlTest.java 0000000 generic-all +++ ++ ########################################################################### ++ # ++ # javadoc ++--- a/test/jdk/ProblemList.txt +++++ b/test/jdk/ProblemList.txt ++@@ -109,6 +109,49 @@ ++ # ++ ############################################################################# ++ +++ +++ +++# to investigate +++# unable to read LD_LIBRARY_PATH env variable +++ +++tools/jpackage/share/AppLauncherEnvTest.java 0000000 generic-all +++ +++# java.lang.Exception: Proc abnormal end +++ +++sun/security/krb5/auto/Cleaners.java 0000000 generic-all +++ +++# result: Failed. Execution failed: `main' threw exception: java.io.IOException: Mount point not found +++# https://bugs.openjdk.org/browse/JDK-8166162 +++ +++java/io/File/createTempFile/TargetDirectory.java 8166162 generic-all +++java/nio/file/Files/CheckPermissions.java 8166162 generic-all +++java/nio/file/Files/TemporaryFiles.java 8166162 generic-all +++java/nio/file/Files/Misc.java 8166162 generic-all +++java/nio/file/Files/CopyAndMove.java 8166162 generic-all +++java/nio/file/Files/InterruptCopy.java 8166162 generic-all +++java/nio/file/Files/FileAttributes.java 8166162 generic-all +++java/nio/file/attribute/BasicFileAttributeView/SetTimesNanos.java 8166162 generic-all +++java/nio/file/attribute/AclFileAttributeView/Basic.java 8166162 generic-all +++java/nio/file/attribute/DosFileAttributeView/Basic.java 8166162 generic-all +++java/nio/file/attribute/PosixFileAttributeView/Basic.java 8166162 generic-all +++java/nio/file/attribute/UserDefinedFileAttributeView/Basic.java 8166162 generic-all +++java/nio/file/FileStore/Basic.java 8166162 generic-all +++java/nio/channels/FileChannel/directio/DirectIOTest.java 8166162 generic-all +++java/nio/channels/FileChannel/directio/ReadDirect.java 8166162 generic-all +++java/nio/channels/FileChannel/directio/PreadDirect.java 8166162 generic-all +++java/nio/channels/FileChannel/directio/PwriteDirect.java 8166162 generic-all +++java/nio/channels/FileChannel/directio/WriteDirect.java 8166162 generic-all +++ +++# utf-8 locale is missing on build machine +++# https://bugs.openjdk.org/browse/JDK-8249079 +++ +++java/lang/invoke/lambda/LambdaFileEncodingSerialization.java 8249079 generic-all +++ +++# reproducible-properties-timestamp.diff introduce a bug that breaks this test +++# remove after the patch is dropped +++ +++java/util/logging/LogManager/Configuration/updateConfiguration/SimpleUpdateConfigWithInputStreamTest.java 000000 generic-all +++ ++ ############################################################################ ++ ++ # jdk_awt ++@@ -537,7 +580,6 @@ ++ java/lang/ProcessHandle/InfoTest.java 8211847 aix-ppc64 ++ java/lang/invoke/LFCaching/LFMultiThreadCachingTest.java 8151492 generic-all ++ java/lang/invoke/LFCaching/LFGarbageCollectedTest.java 8078602 generic-all ++-java/lang/invoke/lambda/LambdaFileEncodingSerialization.java 8249079 linux-x64 ++ ++ ############################################################################ ++ diff --cc debian/patches/hotspot-disable-exec-shield-workaround.diff index 000000000,000000000..669b91145 new file mode 100644 --- /dev/null +++ b/debian/patches/hotspot-disable-exec-shield-workaround.diff @@@ -1,0 -1,0 +1,11 @@@ ++--- a/src/hotspot/os_cpu/linux_x86/os_linux_x86.cpp +++++ b/src/hotspot/os_cpu/linux_x86/os_linux_x86.cpp ++@@ -657,7 +657,7 @@ ++ * updates (JDK-8023956). ++ */ ++ void os::workaround_expand_exec_shield_cs_limit() { ++-#if defined(IA32) +++#if 0 ++ assert(Linux::initial_thread_stack_bottom() != NULL, "sanity"); ++ size_t page_size = os::vm_page_size(); ++ diff --cc debian/patches/hotspot-mips-align.diff index 000000000,000000000..f60a5ed62 new file mode 100644 --- /dev/null +++ b/debian/patches/hotspot-mips-align.diff @@@ -1,0 -1,0 +1,11 @@@ ++--- a/src/hotspot/cpu/zero/zeroInterpreter_zero.cpp +++++ b/src/hotspot/cpu/zero/zeroInterpreter_zero.cpp ++@@ -426,7 +426,7 @@ ++ ThreadStateTransition::transition_from_java(thread, _thread_in_native); ++ ++ // Make the call ++- intptr_t result[4 - LogBytesPerWord]; +++ intptr_t result[4 - LogBytesPerWord] __attribute__((__aligned__(__alignof__(double)))); ++ ffi_call(handler->cif(), (void (*)()) function, result, arguments); ++ ++ // Change the thread state back to _thread_in_Java and ensure it diff --cc debian/patches/icc_loading_with_symlink.diff index 000000000,000000000..9bbf1dd7d new file mode 100644 --- /dev/null +++ b/debian/patches/icc_loading_with_symlink.diff @@@ -1,0 -1,0 +1,12 @@@ ++--- a/src/java.desktop/share/classes/java/awt/color/ICC_Profile.java +++++ b/src/java.desktop/share/classes/java/awt/color/ICC_Profile.java ++@@ -1327,9 +1327,6 @@ ++ dir = st.nextToken(); ++ fullPath = dir + File.separatorChar + fileName; ++ f = new File(fullPath); ++- if (!isChildOf(f, dir)) { ++- f = null; ++- } ++ } ++ } ++ diff --cc debian/patches/icedtea-override-redirect-compiz.diff index 000000000,000000000..fe5da60eb new file mode 100644 --- /dev/null +++ b/debian/patches/icedtea-override-redirect-compiz.diff @@@ -1,0 -1,0 +1,10 @@@ ++--- a/src/java.desktop/unix/classes/sun/awt/X11/XWindowPeer.java +++++ b/src/java.desktop/unix/classes/sun/awt/X11/XWindowPeer.java ++@@ -1260,6 +1260,7 @@ ++ ++ boolean isOverrideRedirect() { ++ return XWM.getWMID() == XWM.OPENLOOK_WM || +++ XWM.getWMID() == XWM.COMPIZ_WM || ++ Window.Type.POPUP.equals(getWindowType()); ++ } ++ diff --cc debian/patches/jdk-getAccessibleValue.diff index 000000000,000000000..22753e2ab new file mode 100644 --- /dev/null +++ b/debian/patches/jdk-getAccessibleValue.diff @@@ -1,0 -1,0 +1,166 @@@ ++--- a/src/java.desktop/share/classes/javax/swing/JTree.java +++++ b/src/java.desktop/share/classes/javax/swing/JTree.java ++@@ -5133,7 +5133,7 @@ ++ public AccessibleSelection getAccessibleSelection() { ++ AccessibleContext ac = getCurrentAccessibleContext(); ++ if (ac != null && isLeaf) { ++- return getCurrentAccessibleContext().getAccessibleSelection(); +++ return ac.getAccessibleSelection(); ++ } else { ++ return this; ++ } ++@@ -5148,7 +5148,7 @@ ++ public AccessibleText getAccessibleText() { ++ AccessibleContext ac = getCurrentAccessibleContext(); ++ if (ac != null) { ++- return getCurrentAccessibleContext().getAccessibleText(); +++ return ac.getAccessibleText(); ++ } else { ++ return null; ++ } ++@@ -5163,7 +5163,7 @@ ++ public AccessibleValue getAccessibleValue() { ++ AccessibleContext ac = getCurrentAccessibleContext(); ++ if (ac != null) { ++- return getCurrentAccessibleContext().getAccessibleValue(); +++ return ac.getAccessibleValue(); ++ } else { ++ return null; ++ } ++--- a/src/java.desktop/share/classes/javax/swing/table/JTableHeader.java +++++ b/src/java.desktop/share/classes/javax/swing/table/JTableHeader.java ++@@ -1143,7 +1143,12 @@ ++ } ++ ++ public AccessibleAction getAccessibleAction() { ++- return getCurrentAccessibleContext().getAccessibleAction(); +++ AccessibleContext ac = getCurrentAccessibleContext(); +++ if (ac != null) { +++ return ac.getAccessibleAction(); +++ } else { +++ return null; +++ } ++ } ++ ++ /** ++@@ -1159,15 +1164,30 @@ ++ } ++ ++ public AccessibleSelection getAccessibleSelection() { ++- return getCurrentAccessibleContext().getAccessibleSelection(); +++ AccessibleContext ac = getCurrentAccessibleContext(); +++ if (ac != null) { +++ return ac.getAccessibleSelection(); +++ } else { +++ return null; +++ } ++ } ++ ++ public AccessibleText getAccessibleText() { ++- return getCurrentAccessibleContext().getAccessibleText(); +++ AccessibleContext ac = getCurrentAccessibleContext(); +++ if (ac != null) { +++ return ac.getAccessibleText(); +++ } else { +++ return null; +++ } ++ } ++ ++ public AccessibleValue getAccessibleValue() { ++- return getCurrentAccessibleContext().getAccessibleValue(); +++ AccessibleContext ac = getCurrentAccessibleContext(); +++ if (ac != null) { +++ return ac.getAccessibleValue(); +++ } else { +++ return null; +++ } ++ } ++ ++ ++--- a/src/java.desktop/share/classes/javax/swing/JTable.java +++++ b/src/java.desktop/share/classes/javax/swing/JTable.java ++@@ -9223,7 +9223,12 @@ ++ * @return the AccessibleAction, or null ++ */ ++ public AccessibleAction getAccessibleAction() { ++- return getCurrentAccessibleContext().getAccessibleAction(); +++ AccessibleContext ac = getCurrentAccessibleContext(); +++ if (ac != null) { +++ return ac.getAccessibleAction(); +++ } else { +++ return null; +++ } ++ } ++ ++ /** ++@@ -9245,7 +9250,12 @@ ++ * null ++ */ ++ public AccessibleSelection getAccessibleSelection() { ++- return getCurrentAccessibleContext().getAccessibleSelection(); +++ AccessibleContext ac = getCurrentAccessibleContext(); +++ if (ac != null) { +++ return ac.getAccessibleSelection(); +++ } else { +++ return null; +++ } ++ } ++ ++ /** ++@@ -9255,7 +9265,12 @@ ++ * @return the AccessibleText, or null ++ */ ++ public AccessibleText getAccessibleText() { ++- return getCurrentAccessibleContext().getAccessibleText(); +++ AccessibleContext ac = getCurrentAccessibleContext(); +++ if (ac != null) { +++ return ac.getAccessibleText(); +++ } else { +++ return null; +++ } ++ } ++ ++ /** ++@@ -9265,7 +9280,12 @@ ++ * @return the AccessibleValue, or null ++ */ ++ public AccessibleValue getAccessibleValue() { ++- return getCurrentAccessibleContext().getAccessibleValue(); +++ AccessibleContext ac = getCurrentAccessibleContext(); +++ if (ac != null) { +++ return ac.getAccessibleValue(); +++ } else { +++ return null; +++ } ++ } ++ ++ ++--- a/src/jdk.accessibility/windows/classes/com/sun/java/accessibility/internal/AccessBridge.java +++++ b/src/jdk.accessibility/windows/classes/com/sun/java/accessibility/internal/AccessBridge.java ++@@ -6562,7 +6562,7 @@ ++ public AccessibleSelection getAccessibleSelection() { ++ AccessibleContext ac = getCurrentAccessibleContext(); ++ if (ac != null && isLeaf) { ++- return getCurrentAccessibleContext().getAccessibleSelection(); +++ return ac.getAccessibleSelection(); ++ } else { ++ return this; ++ } ++@@ -6577,7 +6577,7 @@ ++ public AccessibleText getAccessibleText() { ++ AccessibleContext ac = getCurrentAccessibleContext(); ++ if (ac != null) { ++- return getCurrentAccessibleContext().getAccessibleText(); +++ return ac.getAccessibleText(); ++ } else { ++ return null; ++ } ++@@ -6592,7 +6592,7 @@ ++ public AccessibleValue getAccessibleValue() { ++ AccessibleContext ac = getCurrentAccessibleContext(); ++ if (ac != null) { ++- return getCurrentAccessibleContext().getAccessibleValue(); +++ return ac.getAccessibleValue(); ++ } else { ++ return null; ++ } diff --cc debian/patches/jdk-i18n-pt_BR.diff index 000000000,000000000..59e24653f new file mode 100644 --- /dev/null +++ b/debian/patches/jdk-i18n-pt_BR.diff @@@ -1,0 -1,0 +1,11 @@@ ++--- a/src/java.desktop/share/classes/sun/awt/resources/awt_pt_BR.properties +++++ b/src/java.desktop/share/classes/sun/awt/resources/awt_pt_BR.properties ++@@ -38,7 +38,7 @@ ++ # Key names ++ AWT.enter=Enter ++ AWT.backSpace=Backspace ++-AWT.tab=Guia +++AWT.tab=Tab ++ AWT.cancel=Cancelar ++ AWT.clear=Limpar ++ AWT.pause=Pausar diff --cc debian/patches/jtreg-location.diff index 000000000,000000000..cfbf84896 new file mode 100644 --- /dev/null +++ b/debian/patches/jtreg-location.diff @@@ -1,0 -1,0 +1,51 @@@ ++--- a/make/autoconf/toolchain.m4 +++++ b/make/autoconf/toolchain.m4 ++@@ -1029,7 +1029,7 @@ ++ AC_MSG_ERROR([jtreg home directory from --with-jtreg=$with_jtreg does not exist]) ++ fi ++ ++- if test ! -e "$JT_HOME/lib/jtreg.jar"; then +++ if test ! -e "$JT_HOME/lib/jtreg.jar" && test ! -e "$JT_HOME/share/java/jtreg.jar"; then ++ AC_MSG_ERROR([jtreg home directory from --with-jtreg=$with_jtreg is not a valid jtreg home]) ++ fi ++ ++@@ -1043,7 +1043,7 @@ ++ AC_MSG_WARN([Ignoring JT_HOME pointing to invalid directory: $JT_HOME]) ++ JT_HOME= ++ else ++- if test ! -e "$JT_HOME/lib/jtreg.jar"; then +++ if test ! -e "$JT_HOME/lib/jtreg.jar" && test ! -e "$JT_HOME/share/java/jtreg.jar"; then ++ AC_MSG_WARN([Ignoring JT_HOME which is not a valid jtreg home: $JT_HOME]) ++ JT_HOME= ++ else ++@@ -1059,7 +1059,7 @@ ++ if test "x$JTREGEXE" != x; then ++ # That's good, now try to derive JT_HOME ++ JT_HOME=`(cd $($DIRNAME $JTREGEXE)/.. && pwd)` ++- if test ! -e "$JT_HOME/lib/jtreg.jar"; then +++ if test ! -e "$JT_HOME/lib/jtreg.jar" && test ! -e "$JT_HOME/share/java/jtreg.jar"; then ++ AC_MSG_WARN([Ignoring jtreg from path since a valid jtreg home cannot be found]) ++ JT_HOME= ++ else ++--- a/make/RunTests.gmk +++++ b/make/RunTests.gmk ++@@ -846,7 +846,7 @@ ++ ++ $1_COMMAND_LINE := \ ++ $$(JAVA) $$($1_JTREG_LAUNCHER_OPTIONS) \ ++- -Dprogram=jtreg -jar $$(JT_HOME)/lib/jtreg.jar \ +++ -Dprogram=jtreg -jar $$(if $$(wildcard $$(JT_HOME)/lib/jtreg.jar),$$(JT_HOME)/lib/jtreg.jar,$$(JT_HOME)/share/java/jtreg.jar) \ ++ $$($1_JTREG_BASIC_OPTIONS) \ ++ -testjdk:$$(JDK_UNDER_TEST) \ ++ -dir:$$(JTREG_TOPDIR) \ ++--- a/make/test/BuildFailureHandler.gmk +++++ b/make/test/BuildFailureHandler.gmk ++@@ -38,7 +38,7 @@ ++ FH_SUPPORT := $(SUPPORT_OUTPUTDIR)/test/failure_handler ++ FH_JAR := $(FH_SUPPORT)/jtregFailureHandler.jar ++ ++-JTREG_JAR := $(JT_HOME)/lib/jtreg.jar +++JTREG_JAR := $(if $(wildcard $(JT_HOME)/lib/jtreg.jar),$(JT_HOME)/lib/jtreg.jar,$(JT_HOME)/share/java/jtreg.jar) ++ ifeq ($(wildcard $(JTREG_JAR)), ) ++ $(error Cannot build failure handler without jtreg) ++ endif diff --cc debian/patches/keep-gtk2-as-default.patch index 000000000,000000000..b65eaa1f0 new file mode 100644 --- /dev/null +++ b/debian/patches/keep-gtk2-as-default.patch @@@ -1,0 -1,0 +1,55 @@@ ++Description: Revert upstream default to GTK2 ++ OpenJDK 11 uses GTK3 by default for the GTK Look and Feel but it still ++ has too many pending issues and artifacts and is not on par with GTK2 ++ support. This patch is the reverse of the upstream commit which ensures ++ that GTK2 is tried before GTK3. ++ ++ When GTK3 becomes better supported we should drop this and update the ++ dlopen_jre_depends variable in debian/rules accordingly. ++Origin: upstream, http://hg.openjdk.java.net/jdk-updates/jdk11u/rev/3e3696a308e1 ++Bug: https://bugs.openjdk.java.net/browse/JDK-8198649, ++ https://bugs.java.com/bugdatabase/view_bug.do?bug_id=JDK-8198654 ++Bug-Ubuntu: https://launchpad.net/bugs/1770278 ++Forwarded: not-needed ++Applied-Upstream: http//hg.openjdk.java.net/jdk-updates/jdk11u/rev/3e3696a308e1 ++Reviewed-by: ++Last-Update: 2019-03-27 ++--- ++This patch header follows DEP-3: http://dep.debian.net/deps/dep3/ ++ ++# HG changeset patch ++# User prr ++# Date 1521484190 25200 ++# Node ID 3e3696a308e1674a1056d5ff0d660e7fb0d08286 ++# Parent 79f6a4dc221e6290723e773813969de422733545 ++8198649: Switch AWT/Swing's default GTK version to 3 ++Reviewed-by: psadhukhan, kaddepalli ++ ++--- a/src/java.desktop/unix/native/libawt_xawt/awt/gtk_interface.c +++++ b/src/java.desktop/unix/native/libawt_xawt/awt/gtk_interface.c ++@@ -50,18 +50,18 @@ ++ ++ static GtkLib gtk_libs[] = { ++ { ++- GTK_3, ++- JNI_LIB_NAME("gtk-3"), ++- VERSIONED_JNI_LIB_NAME("gtk-3", "0"), ++- >k3_load, ++- >k3_check ++- }, ++- { ++ GTK_2, ++ JNI_LIB_NAME("gtk-x11-2.0"), ++ VERSIONED_JNI_LIB_NAME("gtk-x11-2.0", "0"), ++ >k2_load, ++ >k2_check +++ }, +++ { +++ GTK_3, +++ JNI_LIB_NAME("gtk-3"), +++ VERSIONED_JNI_LIB_NAME("gtk-3", "0"), +++ >k3_load, +++ >k3_check ++ } ++ }; ++ diff --cc debian/patches/ldap-timeout-test-use-ip.patch index 000000000,000000000..fd61a86b3 new file mode 100644 --- /dev/null +++ b/debian/patches/ldap-timeout-test-use-ip.patch @@@ -1,0 -1,0 +1,19 @@@ ++Description: Replace hostname with a non-existant ip address ++ In Ubuntu build environment we do not have DNS resolver, causing an unexpected test ++ failure. Use a non-existant IP address instead of hostname. ++Author: Vladimir Petko ++Forwarded: no ++Last-Update: 2023-03-27 ++--- a/test/jdk/com/sun/jndi/ldap/LdapPoolTimeoutTest.java +++++ b/test/jdk/com/sun/jndi/ldap/LdapPoolTimeoutTest.java ++@@ -86,7 +86,9 @@ ++ env.put("com.sun.jndi.ldap.read.timeout", String.valueOf(READ_MILLIS)); ++ env.put("com.sun.jndi.ldap.connect.timeout", String.valueOf(CONNECT_MILLIS)); ++ env.put("com.sun.jndi.ldap.connect.pool", "true"); ++- env.put(Context.PROVIDER_URL, "ldap://example.com:1234"); +++ // there should not be a LDAP provider on multicast address, but +++ // network stack should be able to send to it +++ env.put(Context.PROVIDER_URL, "ldap://224.0.0.0:1234"); ++ ++ try { ++ futures.add(executorService.submit(() -> { attemptConnect(env); return null; })); diff --cc debian/patches/libpcsclite-dlopen.diff index 000000000,000000000..f8fa94a3e new file mode 100644 --- /dev/null +++ b/debian/patches/libpcsclite-dlopen.diff @@@ -1,0 -1,0 +1,54 @@@ ++--- a/src/java.smartcardio/unix/classes/sun/security/smartcardio/PlatformPCSC.java +++++ b/src/java.smartcardio/unix/classes/sun/security/smartcardio/PlatformPCSC.java ++@@ -46,6 +46,7 @@ ++ ++ private final static String PROP_NAME = "sun.security.smartcardio.library"; ++ +++ private final static String LIB0 = "libpcsclite.so.1"; ++ private final static String LIB1 = "/usr/$LIBISA/libpcsclite.so"; ++ private final static String LIB2 = "/usr/local/$LIBISA/libpcsclite.so"; ++ private final static String PCSC_FRAMEWORK = "/System/Library/Frameworks/PCSC.framework/Versions/Current/PCSC"; ++@@ -98,40 +99,9 @@ ++ if (lib.length() != 0) { ++ return lib; ++ } ++- lib = expand(LIB1); ++- if (new File(lib).isFile()) { ++- // if LIB1 exists, use that ++- return lib; ++- } ++- lib = expand(LIB2); ++- if (new File(lib).isFile()) { ++- // if LIB2 exists, use that ++- return lib; ++- } ++- ++- // As of macos 11, framework libraries have been removed from the file ++- // system, but in such a way that they can still be dlopen()ed, even ++- // though they can no longer be open()ed. ++- // ++- // https://developer.apple.com/documentation/macos-release-notes/macos-big-sur-11_0_1-release-notes ++- // ++- // """New in macOS Big Sur 11.0.1, the system ships with a built-in ++- // dynamic linker cache of all system-provided libraries. As part of ++- // this change, copies of dynamic libraries are no longer present on ++- // the filesystem. Code that attempts to check for dynamic library ++- // presence by looking for a file at a path or enumerating a directory ++- // will fail. Instead, check for library presence by attempting to ++- // dlopen() the path, which will correctly check for the library in the ++- // cache.""" ++- // ++- // The directory structure remains otherwise intact, so check for ++- // existence of the containing directory instead of the file. ++- lib = PCSC_FRAMEWORK; ++- if (new File(lib).getParentFile().isDirectory()) { ++- // if PCSC.framework exists, use that ++- return lib; ++- } ++- throw new IOException("No PC/SC library found on this system"); +++ // let dlopen do the work +++ lib = LIB0; +++ return lib; ++ } ++ ++ private static native void initialize(String libraryName); diff --cc debian/patches/log-generated-classes-test.patch index 000000000,000000000..849de17b3 new file mode 100644 --- /dev/null +++ b/debian/patches/log-generated-classes-test.patch @@@ -1,0 -1,0 +1,39 @@@ ++Description: Skip test when getFileStore() fails ++ Due to https://bugs.openjdk.org/browse/JDK-8166162 getFileStore() fails with exception ++ failing the test suite. Skip test when getFileStore() throws. ++Author: Vladimir Petko ++Bug: https://bugs.openjdk.org/browse/JDK-8166162 ++Forwarded: not-needed ++Last-Update: 2023-03-27 ++--- a/test/jdk/java/lang/invoke/lambda/LogGeneratedClassesTest.java +++++ b/test/jdk/java/lang/invoke/lambda/LogGeneratedClassesTest.java ++@@ -195,14 +195,21 @@ ++ ++ @Test ++ public void testDumpDirNotWritable() throws IOException { ++- if (!Files.getFileStore(Paths.get(".")) ++- .supportsFileAttributeView(PosixFileAttributeView.class)) { ++- // No easy way to setup readonly directory without POSIX ++- // We would like to skip the test with a cause with ++- // throw new SkipException("Posix not supported"); ++- // but jtreg will report failure so we just pass the test ++- // which we can look at if jtreg changed its behavior ++- System.out.println("WARNING: POSIX is not supported. Skipping testDumpDirNotWritable test."); +++ try +++ { +++ if (!Files.getFileStore(Paths.get(".")) +++ .supportsFileAttributeView(PosixFileAttributeView.class)) { +++ // No easy way to setup readonly directory without POSIX +++ // We would like to skip the test with a cause with +++ // throw new SkipException("Posix not supported"); +++ // but jtreg will report failure so we just pass the test +++ // which we can look at if jtreg changed its behavior +++ System.out.println("WARNING: POSIX is not supported. Skipping testDumpDirNotWritable test."); +++ return; +++ } +++ } +++ catch (Throwable t) { +++ System.out.println("WARNING: Mount point not found (JDK-8166162). Skipping testDumpDirNotWritable test."); ++ return; ++ } ++ diff --cc debian/patches/m68k-support.diff index 000000000,000000000..3208fb6f3 new file mode 100644 --- /dev/null +++ b/debian/patches/m68k-support.diff @@@ -1,0 -1,0 +1,1100 @@@ ++Subject: Fix alignment issues on m68k ++Author: John Paul Adrian Glaubitz ++Last Update: 2019-04-29 ++ ++--- /dev/null +++++ b/make/data/x11wrappergen/sizes-32-linux-m68k.txt ++@@ -0,0 +1,1017 @@ +++long 4 +++int 4 +++short 2 +++ptr 4 +++Bool 4 +++Atom 4 +++Window 4 +++XExtData.number 0 +++XExtData.next 4 +++XExtData.free_private 8 +++XExtData.private_data 12 +++XExtData 16 +++XIMStringConversionCallbackStruct.position 0 +++XIMStringConversionCallbackStruct.direction 2 +++XIMStringConversionCallbackStruct.operation 6 +++XIMStringConversionCallbackStruct.factor 8 +++XIMStringConversionCallbackStruct.text 10 +++XIMStringConversionCallbackStruct 14 +++XkbNewKeyboardNotifyEvent.type 0 +++XkbNewKeyboardNotifyEvent.serial 4 +++XkbNewKeyboardNotifyEvent.send_event 8 +++XkbNewKeyboardNotifyEvent.display 12 +++XkbNewKeyboardNotifyEvent.time 16 +++XkbNewKeyboardNotifyEvent.xkb_type 20 +++XkbNewKeyboardNotifyEvent.device 24 +++XkbNewKeyboardNotifyEvent.old_device 28 +++XkbNewKeyboardNotifyEvent.min_key_code 32 +++XkbNewKeyboardNotifyEvent.max_key_code 36 +++XkbNewKeyboardNotifyEvent.old_min_key_code 40 +++XkbNewKeyboardNotifyEvent.old_max_key_code 44 +++XkbNewKeyboardNotifyEvent.changed 48 +++XkbNewKeyboardNotifyEvent.req_major 52 +++XkbNewKeyboardNotifyEvent.req_minor 53 +++XkbNewKeyboardNotifyEvent 54 +++XTimeCoord.time 0 +++XTimeCoord.x 4 +++XTimeCoord.y 6 +++XTimeCoord 8 +++XkbCompatMapNotifyEvent.type 0 +++XkbCompatMapNotifyEvent.serial 4 +++XkbCompatMapNotifyEvent.send_event 8 +++XkbCompatMapNotifyEvent.display 12 +++XkbCompatMapNotifyEvent.time 16 +++XkbCompatMapNotifyEvent.xkb_type 20 +++XkbCompatMapNotifyEvent.device 24 +++XkbCompatMapNotifyEvent.changed_groups 28 +++XkbCompatMapNotifyEvent.first_si 32 +++XkbCompatMapNotifyEvent.num_si 36 +++XkbCompatMapNotifyEvent.num_total_si 40 +++XkbCompatMapNotifyEvent 44 +++XIMStatusDrawCallbackStruct.type 0 +++XIMStatusDrawCallbackStruct.data 4 +++XIMStatusDrawCallbackStruct 8 +++XKeyboardControl.key_click_percent 0 +++XKeyboardControl.bell_percent 4 +++XKeyboardControl.bell_pitch 8 +++XKeyboardControl.bell_duration 12 +++XKeyboardControl.led 16 +++XKeyboardControl.led_mode 20 +++XKeyboardControl.key 24 +++XKeyboardControl.auto_repeat_mode 28 +++XKeyboardControl 32 +++XSelectionClearEvent.type 0 +++XSelectionClearEvent.serial 4 +++XSelectionClearEvent.send_event 8 +++XSelectionClearEvent.display 12 +++XSelectionClearEvent.window 16 +++XSelectionClearEvent.selection 20 +++XSelectionClearEvent.time 24 +++XSelectionClearEvent 28 +++XWindowChanges.x 0 +++XWindowChanges.y 4 +++XWindowChanges.width 8 +++XWindowChanges.height 12 +++XWindowChanges.border_width 16 +++XWindowChanges.sibling 20 +++XWindowChanges.stack_mode 24 +++XWindowChanges 28 +++XIMPreeditCaretCallbackStruct.position 0 +++XIMPreeditCaretCallbackStruct.direction 4 +++XIMPreeditCaretCallbackStruct.style 8 +++XIMPreeditCaretCallbackStruct 12 +++XOMCharSetList.charset_count 0 +++XOMCharSetList.charset_list 4 +++XOMCharSetList 8 +++XOMFontInfo.num_font 0 +++XOMFontInfo.font_struct_list 4 +++XOMFontInfo.font_name_list 8 +++XOMFontInfo 12 +++AwtScreenData.numConfigs 0 +++AwtScreenData.root 4 +++AwtScreenData.whitepixel 8 +++AwtScreenData.blackpixel 12 +++AwtScreenData.defaultConfig 16 +++AwtScreenData.configs 20 +++AwtScreenData 24 +++XIMHotKeyTrigger.keysym 0 +++XIMHotKeyTrigger.modifier 4 +++XIMHotKeyTrigger.modifier_mask 8 +++XIMHotKeyTrigger 12 +++XCirculateEvent.type 0 +++XCirculateEvent.serial 4 +++XCirculateEvent.send_event 8 +++XCirculateEvent.display 12 +++XCirculateEvent.event 16 +++XCirculateEvent.window 20 +++XCirculateEvent.place 24 +++XCirculateEvent 28 +++Screen.ext_data 0 +++Screen.display 4 +++Screen.root 8 +++Screen.width 12 +++Screen.height 16 +++Screen.mwidth 20 +++Screen.mheight 24 +++Screen.ndepths 28 +++Screen.depths 32 +++Screen.root_depth 36 +++Screen.root_visual 40 +++Screen.default_gc 44 +++Screen.cmap 48 +++Screen.white_pixel 52 +++Screen.black_pixel 56 +++Screen.max_maps 60 +++Screen.min_maps 64 +++Screen.backing_store 68 +++Screen.save_unders 72 +++Screen.root_input_mask 76 +++Screen 80 +++XMapRequestEvent.type 0 +++XMapRequestEvent.serial 4 +++XMapRequestEvent.send_event 8 +++XMapRequestEvent.display 12 +++XMapRequestEvent.parent 16 +++XMapRequestEvent.window 20 +++XMapRequestEvent 24 +++XIMText.length 0 +++XIMText.feedback 2 +++XIMText.encoding_is_wchar 6 +++XIMText.string 10 +++XIMText 14 +++XGraphicsExposeEvent.type 0 +++XGraphicsExposeEvent.serial 4 +++XGraphicsExposeEvent.send_event 8 +++XGraphicsExposeEvent.display 12 +++XGraphicsExposeEvent.drawable 16 +++XGraphicsExposeEvent.x 20 +++XGraphicsExposeEvent.y 24 +++XGraphicsExposeEvent.width 28 +++XGraphicsExposeEvent.height 32 +++XGraphicsExposeEvent.count 36 +++XGraphicsExposeEvent.major_code 40 +++XGraphicsExposeEvent.minor_code 44 +++XGraphicsExposeEvent 48 +++XEvent.type 0 +++XEvent.xany 0 +++XEvent.xkey 0 +++XEvent.xbutton 0 +++XEvent.xmotion 0 +++XEvent.xcrossing 0 +++XEvent.xfocus 0 +++XEvent.xexpose 0 +++XEvent.xgraphicsexpose 0 +++XEvent.xnoexpose 0 +++XEvent.xvisibility 0 +++XEvent.xcreatewindow 0 +++XEvent.xdestroywindow 0 +++XEvent.xunmap 0 +++XEvent.xmap 0 +++XEvent.xmaprequest 0 +++XEvent.xreparent 0 +++XEvent.xconfigure 0 +++XEvent.xgravity 0 +++XEvent.xresizerequest 0 +++XEvent.xconfigurerequest 0 +++XEvent.xcirculate 0 +++XEvent.xcirculaterequest 0 +++XEvent.xproperty 0 +++XEvent.xselectionclear 0 +++XEvent.xselectionrequest 0 +++XEvent.xselection 0 +++XEvent.xcolormap 0 +++XEvent.xclient 0 +++XEvent.xmapping 0 +++XEvent.xerror 0 +++XEvent.xkeymap 0 +++XEvent.pad 0 +++XEvent 96 +++XRenderDirectFormat.red 0 +++XRenderDirectFormat.redMask 2 +++XRenderDirectFormat.green 4 +++XRenderDirectFormat.greenMask 6 +++XRenderDirectFormat.blue 8 +++XRenderDirectFormat.blueMask 10 +++XRenderDirectFormat.alpha 12 +++XRenderDirectFormat.alphaMask 14 +++XRenderDirectFormat 16 +++ColorData.awt_Colors 0 +++ColorData.awt_numICMcolors 4 +++ColorData.awt_icmLUT 8 +++ColorData.awt_icmLUT2Colors 12 +++ColorData.img_grays 16 +++ColorData.img_clr_tbl 20 +++ColorData.img_oda_red 24 +++ColorData.img_oda_green 28 +++ColorData.img_oda_blue 32 +++ColorData.pGrayInverseLutData 36 +++ColorData.representsPrimaries 44 +++ColorData.screendata 40 +++ColorData 48 +++XFontStruct.ext_data 0 +++XFontStruct.fid 4 +++XFontStruct.direction 8 +++XFontStruct.min_char_or_byte2 12 +++XFontStruct.max_char_or_byte2 16 +++XFontStruct.min_byte1 20 +++XFontStruct.max_byte1 24 +++XFontStruct.all_chars_exist 28 +++XFontStruct.n_properties 36 +++XFontStruct.properties 40 +++XFontStruct.min_bounds 44 +++XFontStruct.max_bounds 56 +++XFontStruct.per_char 68 +++XFontStruct.ascent 72 +++XFontStruct.descent 76 +++XFontStruct 80 +++XExtCodes.extension 0 +++XExtCodes.major_opcode 4 +++XExtCodes.first_event 8 +++XExtCodes.first_error 12 +++XExtCodes 16 +++XFontSetExtents.max_ink_extent 0 +++XFontSetExtents.max_logical_extent 8 +++XFontSetExtents 16 +++XSelectionEvent.type 0 +++XSelectionEvent.serial 4 +++XSelectionEvent.send_event 8 +++XSelectionEvent.display 12 +++XSelectionEvent.requestor 16 +++XSelectionEvent.selection 20 +++XSelectionEvent.target 24 +++XSelectionEvent.property 28 +++XSelectionEvent.time 32 +++XSelectionEvent 36 +++XArc.x 0 +++XArc.y 2 +++XArc.width 4 +++XArc.height 6 +++XArc.angle1 8 +++XArc.angle2 10 +++XArc 12 +++XErrorEvent.type 0 +++XErrorEvent.display 4 +++XErrorEvent.resourceid 8 +++XErrorEvent.serial 12 +++XErrorEvent.error_code 16 +++XErrorEvent.request_code 17 +++XErrorEvent.minor_code 18 +++XErrorEvent 20 +++XConfigureRequestEvent.type 0 +++XConfigureRequestEvent.serial 4 +++XConfigureRequestEvent.send_event 8 +++XConfigureRequestEvent.display 12 +++XConfigureRequestEvent.parent 16 +++XConfigureRequestEvent.window 20 +++XConfigureRequestEvent.x 24 +++XConfigureRequestEvent.y 28 +++XConfigureRequestEvent.width 32 +++XConfigureRequestEvent.height 36 +++XConfigureRequestEvent.border_width 40 +++XConfigureRequestEvent.above 44 +++XConfigureRequestEvent.detail 48 +++XConfigureRequestEvent.value_mask 52 +++XConfigureRequestEvent 56 +++ScreenFormat.ext_data 0 +++ScreenFormat.depth 4 +++ScreenFormat.bits_per_pixel 8 +++ScreenFormat.scanline_pad 12 +++ScreenFormat 16 +++XButtonEvent.type 0 +++XButtonEvent.serial 4 +++XButtonEvent.send_event 8 +++XButtonEvent.display 12 +++XButtonEvent.window 16 +++XButtonEvent.root 20 +++XButtonEvent.subwindow 24 +++XButtonEvent.time 28 +++XButtonEvent.x 32 +++XButtonEvent.y 36 +++XButtonEvent.x_root 40 +++XButtonEvent.y_root 44 +++XButtonEvent.state 48 +++XButtonEvent.button 52 +++XButtonEvent.same_screen 56 +++XButtonEvent 60 +++XFontProp.name 0 +++XFontProp.card32 4 +++XFontProp 8 +++XIMValuesList.count_values 0 +++XIMValuesList.supported_values 2 +++XIMValuesList 6 +++XKeymapEvent.type 0 +++XKeymapEvent.serial 4 +++XKeymapEvent.send_event 8 +++XKeymapEvent.display 12 +++XKeymapEvent.window 16 +++XKeymapEvent.key_vector 20 +++XKeymapEvent 52 +++XTextItem16.chars 0 +++XTextItem16.nchars 4 +++XTextItem16.delta 8 +++XTextItem16.font 12 +++XTextItem16 16 +++XIMPreeditDrawCallbackStruct.caret 0 +++XIMPreeditDrawCallbackStruct.chg_first 4 +++XIMPreeditDrawCallbackStruct.chg_length 8 +++XIMPreeditDrawCallbackStruct.text 12 +++XIMPreeditDrawCallbackStruct 16 +++XVisualInfo.visual 0 +++XVisualInfo.visualid 4 +++XVisualInfo.screen 8 +++XVisualInfo.depth 12 +++XVisualInfo.class 16 +++XVisualInfo.red_mask 20 +++XVisualInfo.green_mask 24 +++XVisualInfo.blue_mask 28 +++XVisualInfo.colormap_size 32 +++XVisualInfo.bits_per_rgb 36 +++XVisualInfo 40 +++XkbControlsNotifyEvent.type 0 +++XkbControlsNotifyEvent.serial 4 +++XkbControlsNotifyEvent.send_event 8 +++XkbControlsNotifyEvent.display 12 +++XkbControlsNotifyEvent.time 16 +++XkbControlsNotifyEvent.xkb_type 20 +++XkbControlsNotifyEvent.device 24 +++XkbControlsNotifyEvent.changed_ctrls 28 +++XkbControlsNotifyEvent.enabled_ctrls 32 +++XkbControlsNotifyEvent.enabled_ctrl_changes 36 +++XkbControlsNotifyEvent.num_groups 40 +++XkbControlsNotifyEvent.keycode 44 +++XkbControlsNotifyEvent.event_type 45 +++XkbControlsNotifyEvent.req_major 46 +++XkbControlsNotifyEvent.req_minor 47 +++XkbControlsNotifyEvent 48 +++PropMwmHints.flags 0 +++PropMwmHints.functions 4 +++PropMwmHints.decorations 8 +++PropMwmHints.inputMode 12 +++PropMwmHints.status 16 +++PropMwmHints 20 +++XClientMessageEvent.type 0 +++XClientMessageEvent.serial 4 +++XClientMessageEvent.send_event 8 +++XClientMessageEvent.display 12 +++XClientMessageEvent.window 16 +++XClientMessageEvent.message_type 20 +++XClientMessageEvent.format 24 +++XClientMessageEvent.data 28 +++XClientMessageEvent 48 +++XAnyEvent.type 0 +++XAnyEvent.serial 4 +++XAnyEvent.send_event 8 +++XAnyEvent.display 12 +++XAnyEvent.window 16 +++XAnyEvent 20 +++XkbIndicatorNotifyEvent.type 0 +++XkbIndicatorNotifyEvent.serial 4 +++XkbIndicatorNotifyEvent.send_event 8 +++XkbIndicatorNotifyEvent.display 12 +++XkbIndicatorNotifyEvent.time 16 +++XkbIndicatorNotifyEvent.xkb_type 20 +++XkbIndicatorNotifyEvent.device 24 +++XkbIndicatorNotifyEvent.changed 28 +++XkbIndicatorNotifyEvent.state 32 +++XkbIndicatorNotifyEvent 36 +++XIMPreeditStateNotifyCallbackStruct.state 0 +++XIMPreeditStateNotifyCallbackStruct 4 +++XkbAnyEvent.type 0 +++XkbAnyEvent.serial 4 +++XkbAnyEvent.send_event 8 +++XkbAnyEvent.display 12 +++XkbAnyEvent.time 16 +++XkbAnyEvent.xkb_type 20 +++XkbAnyEvent.device 24 +++XkbAnyEvent 28 +++XMotionEvent.type 0 +++XMotionEvent.serial 4 +++XMotionEvent.send_event 8 +++XMotionEvent.display 12 +++XMotionEvent.window 16 +++XMotionEvent.root 20 +++XMotionEvent.subwindow 24 +++XMotionEvent.time 28 +++XMotionEvent.x 32 +++XMotionEvent.y 36 +++XMotionEvent.x_root 40 +++XMotionEvent.y_root 44 +++XMotionEvent.state 48 +++XMotionEvent.is_hint 52 +++XMotionEvent.same_screen 54 +++XMotionEvent 58 +++XIMHotKeyTriggers.num_hot_key 0 +++XIMHotKeyTriggers.key 4 +++XIMHotKeyTriggers 8 +++XIMStyles.count_styles 0 +++XIMStyles.supported_styles 2 +++XIMStyles 6 +++XkbExtensionDeviceNotifyEvent.type 0 +++XkbExtensionDeviceNotifyEvent.serial 4 +++XkbExtensionDeviceNotifyEvent.send_event 8 +++XkbExtensionDeviceNotifyEvent.display 12 +++XkbExtensionDeviceNotifyEvent.time 16 +++XkbExtensionDeviceNotifyEvent.xkb_type 20 +++XkbExtensionDeviceNotifyEvent.device 24 +++XkbExtensionDeviceNotifyEvent.reason 28 +++XkbExtensionDeviceNotifyEvent.supported 32 +++XkbExtensionDeviceNotifyEvent.unsupported 36 +++XkbExtensionDeviceNotifyEvent.first_btn 40 +++XkbExtensionDeviceNotifyEvent.num_btns 44 +++XkbExtensionDeviceNotifyEvent.leds_defined 48 +++XkbExtensionDeviceNotifyEvent.led_state 52 +++XkbExtensionDeviceNotifyEvent.led_class 56 +++XkbExtensionDeviceNotifyEvent.led_id 60 +++XkbExtensionDeviceNotifyEvent 64 +++XwcTextItem.chars 0 +++XwcTextItem.nchars 4 +++XwcTextItem.delta 8 +++XwcTextItem.font_set 12 +++XwcTextItem 16 +++XClassHint.res_name 0 +++XClassHint.res_class 4 +++XClassHint 8 +++XChar2b.byte1 0 +++XChar2b.byte2 1 +++XChar2b 2 +++XSetWindowAttributes.background_pixmap 0 +++XSetWindowAttributes.background_pixel 4 +++XSetWindowAttributes.border_pixmap 8 +++XSetWindowAttributes.border_pixel 12 +++XSetWindowAttributes.bit_gravity 16 +++XSetWindowAttributes.win_gravity 20 +++XSetWindowAttributes.backing_store 24 +++XSetWindowAttributes.backing_planes 28 +++XSetWindowAttributes.backing_pixel 32 +++XSetWindowAttributes.save_under 36 +++XSetWindowAttributes.event_mask 40 +++XSetWindowAttributes.do_not_propagate_mask 44 +++XSetWindowAttributes.override_redirect 48 +++XSetWindowAttributes.colormap 52 +++XSetWindowAttributes.cursor 56 +++XSetWindowAttributes 60 +++XRenderPictFormat.id 0 +++XRenderPictFormat.type 4 +++XRenderPictFormat.depth 8 +++XRenderPictFormat.direct 12 +++XRenderPictFormat.colormap 28 +++XRenderPictFormat 32 +++XReparentEvent.type 0 +++XReparentEvent.serial 4 +++XReparentEvent.send_event 8 +++XReparentEvent.display 12 +++XReparentEvent.event 16 +++XReparentEvent.window 20 +++XReparentEvent.parent 24 +++XReparentEvent.x 28 +++XReparentEvent.y 32 +++XReparentEvent.override_redirect 36 +++XReparentEvent 40 +++XCirculateRequestEvent.type 0 +++XCirculateRequestEvent.serial 4 +++XCirculateRequestEvent.send_event 8 +++XCirculateRequestEvent.display 12 +++XCirculateRequestEvent.parent 16 +++XCirculateRequestEvent.window 20 +++XCirculateRequestEvent.place 24 +++XCirculateRequestEvent 28 +++XImage.width 0 +++XImage.height 4 +++XImage.xoffset 8 +++XImage.format 12 +++XImage.data 16 +++XImage.byte_order 20 +++XImage.bitmap_unit 24 +++XImage.bitmap_bit_order 28 +++XImage.bitmap_pad 32 +++XImage.depth 36 +++XImage.bytes_per_line 40 +++XImage.bits_per_pixel 44 +++XImage.red_mask 48 +++XImage.green_mask 52 +++XImage.blue_mask 56 +++XImage.obdata 60 +++XImage.f.create_image 64 +++XImage.f.destroy_image 68 +++XImage.f.get_pixel 72 +++XImage.f.put_pixel 76 +++XImage.f.sub_image 80 +++XImage.f.add_pixel 84 +++XImage 88 +++XKeyEvent.type 0 +++XKeyEvent.serial 4 +++XKeyEvent.send_event 8 +++XKeyEvent.display 12 +++XKeyEvent.window 16 +++XKeyEvent.root 20 +++XKeyEvent.subwindow 24 +++XKeyEvent.time 28 +++XKeyEvent.x 32 +++XKeyEvent.y 36 +++XKeyEvent.x_root 40 +++XKeyEvent.y_root 44 +++XKeyEvent.state 48 +++XKeyEvent.keycode 52 +++XKeyEvent.same_screen 56 +++XKeyEvent 60 +++XkbActionMessageEvent.type 0 +++XkbActionMessageEvent.serial 4 +++XkbActionMessageEvent.send_event 8 +++XkbActionMessageEvent.display 12 +++XkbActionMessageEvent.time 16 +++XkbActionMessageEvent.xkb_type 20 +++XkbActionMessageEvent.device 24 +++XkbActionMessageEvent.keycode 28 +++XkbActionMessageEvent.press 30 +++XkbActionMessageEvent.key_event_follows 34 +++XkbActionMessageEvent.group 38 +++XkbActionMessageEvent.mods 42 +++XkbActionMessageEvent.message 46 +++XkbActionMessageEvent 54 +++XdbeSwapInfo.swap_window 0 +++XdbeSwapInfo.swap_action 4 +++XdbeSwapInfo 6 +++XTextItem.chars 0 +++XTextItem.nchars 4 +++XTextItem.delta 8 +++XTextItem.font 12 +++XTextItem 16 +++XModifierKeymap.max_keypermod 0 +++XModifierKeymap.modifiermap 4 +++XModifierKeymap 8 +++XCharStruct.lbearing 0 +++XCharStruct.rbearing 2 +++XCharStruct.width 4 +++XCharStruct.ascent 6 +++XCharStruct.descent 8 +++XCharStruct.attributes 10 +++XCharStruct 12 +++XGravityEvent.type 0 +++XGravityEvent.serial 4 +++XGravityEvent.send_event 8 +++XGravityEvent.display 12 +++XGravityEvent.event 16 +++XGravityEvent.window 20 +++XGravityEvent.x 24 +++XGravityEvent.y 28 +++XGravityEvent 32 +++Visual.ext_data 0 +++Visual.visualid 4 +++Visual.class 8 +++Visual.red_mask 12 +++Visual.green_mask 16 +++Visual.blue_mask 20 +++Visual.bits_per_rgb 24 +++Visual.map_entries 28 +++Visual 32 +++XOMOrientation.num_orientation 0 +++XOMOrientation.orientation 4 +++XOMOrientation 8 +++XkbAccessXNotifyEvent.type 0 +++XkbAccessXNotifyEvent.serial 4 +++XkbAccessXNotifyEvent.send_event 8 +++XkbAccessXNotifyEvent.display 12 +++XkbAccessXNotifyEvent.time 16 +++XkbAccessXNotifyEvent.xkb_type 20 +++XkbAccessXNotifyEvent.device 24 +++XkbAccessXNotifyEvent.detail 28 +++XkbAccessXNotifyEvent.keycode 32 +++XkbAccessXNotifyEvent.sk_delay 36 +++XkbAccessXNotifyEvent.debounce_delay 40 +++XkbAccessXNotifyEvent 44 +++XWindowAttributes.x 0 +++XWindowAttributes.y 4 +++XWindowAttributes.width 8 +++XWindowAttributes.height 12 +++XWindowAttributes.border_width 16 +++XWindowAttributes.depth 20 +++XWindowAttributes.visual 24 +++XWindowAttributes.root 28 +++XWindowAttributes.class 32 +++XWindowAttributes.bit_gravity 36 +++XWindowAttributes.win_gravity 40 +++XWindowAttributes.backing_store 44 +++XWindowAttributes.backing_planes 48 +++XWindowAttributes.backing_pixel 52 +++XWindowAttributes.save_under 56 +++XWindowAttributes.colormap 60 +++XWindowAttributes.map_installed 64 +++XWindowAttributes.map_state 68 +++XWindowAttributes.all_event_masks 72 +++XWindowAttributes.your_event_mask 76 +++XWindowAttributes.do_not_propagate_mask 80 +++XWindowAttributes.override_redirect 84 +++XWindowAttributes.screen 88 +++XWindowAttributes 92 +++XmbTextItem.chars 0 +++XmbTextItem.nchars 4 +++XmbTextItem.delta 8 +++XmbTextItem.font_set 12 +++XmbTextItem 16 +++XMappingEvent.type 0 +++XMappingEvent.serial 4 +++XMappingEvent.send_event 8 +++XMappingEvent.display 12 +++XMappingEvent.window 16 +++XMappingEvent.request 20 +++XMappingEvent.first_keycode 24 +++XMappingEvent.count 28 +++XMappingEvent 32 +++XSizeHints.flags 0 +++XSizeHints.x 4 +++XSizeHints.y 8 +++XSizeHints.width 12 +++XSizeHints.height 16 +++XSizeHints.min_width 20 +++XSizeHints.min_height 24 +++XSizeHints.max_width 28 +++XSizeHints.max_height 32 +++XSizeHints.width_inc 36 +++XSizeHints.height_inc 40 +++XSizeHints.min_aspect.x 44 +++XSizeHints.min_aspect.y 48 +++XSizeHints.max_aspect.x 52 +++XSizeHints.max_aspect.y 56 +++XSizeHints.base_width 60 +++XSizeHints.base_height 64 +++XSizeHints.win_gravity 68 +++XSizeHints 72 +++XUnmapEvent.type 0 +++XUnmapEvent.serial 4 +++XUnmapEvent.send_event 8 +++XUnmapEvent.display 12 +++XUnmapEvent.event 16 +++XUnmapEvent.window 20 +++XUnmapEvent.from_configure 24 +++XUnmapEvent 28 +++awtImageData.Depth 0 +++awtImageData.wsImageFormat 4 +++awtImageData.clrdata 16 +++awtImageData.convert 48 +++awtImageData 304 +++XkbStateNotifyEvent.type 0 +++XkbStateNotifyEvent.serial 4 +++XkbStateNotifyEvent.send_event 8 +++XkbStateNotifyEvent.display 12 +++XkbStateNotifyEvent.time 16 +++XkbStateNotifyEvent.xkb_type 20 +++XkbStateNotifyEvent.device 24 +++XkbStateNotifyEvent.changed 28 +++XkbStateNotifyEvent.group 32 +++XkbStateNotifyEvent.base_group 36 +++XkbStateNotifyEvent.latched_group 40 +++XkbStateNotifyEvent.locked_group 44 +++XkbStateNotifyEvent.mods 48 +++XkbStateNotifyEvent.base_mods 52 +++XkbStateNotifyEvent.latched_mods 56 +++XkbStateNotifyEvent.locked_mods 60 +++XkbStateNotifyEvent.compat_state 64 +++XkbStateNotifyEvent.grab_mods 68 +++XkbStateNotifyEvent.compat_grab_mods 69 +++XkbStateNotifyEvent.lookup_mods 70 +++XkbStateNotifyEvent.compat_lookup_mods 71 +++XkbStateNotifyEvent.ptr_buttons 72 +++XkbStateNotifyEvent.keycode 76 +++XkbStateNotifyEvent.event_type 77 +++XkbStateNotifyEvent.req_major 78 +++XkbStateNotifyEvent.req_minor 79 +++XkbStateNotifyEvent 80 +++XExposeEvent.type 0 +++XExposeEvent.serial 4 +++XExposeEvent.send_event 8 +++XExposeEvent.display 12 +++XExposeEvent.window 16 +++XExposeEvent.x 20 +++XExposeEvent.y 24 +++XExposeEvent.width 28 +++XExposeEvent.height 32 +++XExposeEvent.count 36 +++XExposeEvent 40 +++XkbMapNotifyEvent.type 0 +++XkbMapNotifyEvent.serial 4 +++XkbMapNotifyEvent.send_event 8 +++XkbMapNotifyEvent.display 12 +++XkbMapNotifyEvent.time 16 +++XkbMapNotifyEvent.xkb_type 20 +++XkbMapNotifyEvent.device 24 +++XkbMapNotifyEvent.changed 28 +++XkbMapNotifyEvent.flags 32 +++XkbMapNotifyEvent.first_type 36 +++XkbMapNotifyEvent.num_types 40 +++XkbMapNotifyEvent.min_key_code 44 +++XkbMapNotifyEvent.max_key_code 45 +++XkbMapNotifyEvent.first_key_sym 46 +++XkbMapNotifyEvent.first_key_act 47 +++XkbMapNotifyEvent.first_key_behavior 48 +++XkbMapNotifyEvent.first_key_explicit 49 +++XkbMapNotifyEvent.first_modmap_key 50 +++XkbMapNotifyEvent.first_vmodmap_key 51 +++XkbMapNotifyEvent.num_key_syms 52 +++XkbMapNotifyEvent.num_key_acts 56 +++XkbMapNotifyEvent.num_key_behaviors 60 +++XkbMapNotifyEvent.num_key_explicit 64 +++XkbMapNotifyEvent.num_modmap_keys 68 +++XkbMapNotifyEvent.num_vmodmap_keys 72 +++XkbMapNotifyEvent.vmods 76 +++XkbMapNotifyEvent 80 +++XGCValues.function 0 +++XGCValues.plane_mask 4 +++XGCValues.foreground 8 +++XGCValues.background 12 +++XGCValues.line_width 16 +++XGCValues.line_style 20 +++XGCValues.cap_style 24 +++XGCValues.join_style 28 +++XGCValues.fill_style 32 +++XGCValues.fill_rule 36 +++XGCValues.arc_mode 40 +++XGCValues.tile 44 +++XGCValues.stipple 48 +++XGCValues.ts_x_origin 52 +++XGCValues.ts_y_origin 56 +++XGCValues.font 60 +++XGCValues.subwindow_mode 64 +++XGCValues.graphics_exposures 68 +++XGCValues.clip_x_origin 72 +++XGCValues.clip_y_origin 76 +++XGCValues.clip_mask 80 +++XGCValues.dash_offset 84 +++XGCValues.dashes 88 +++XGCValues 90 +++XFocusChangeEvent.type 0 +++XFocusChangeEvent.serial 4 +++XFocusChangeEvent.send_event 8 +++XFocusChangeEvent.display 12 +++XFocusChangeEvent.window 16 +++XFocusChangeEvent.mode 20 +++XFocusChangeEvent.detail 24 +++XFocusChangeEvent 28 +++XPixmapFormatValues.depth 0 +++XPixmapFormatValues.bits_per_pixel 4 +++XPixmapFormatValues.scanline_pad 8 +++XPixmapFormatValues 12 +++XMapEvent.type 0 +++XMapEvent.serial 4 +++XMapEvent.send_event 8 +++XMapEvent.display 12 +++XMapEvent.event 16 +++XMapEvent.window 20 +++XMapEvent.override_redirect 24 +++XMapEvent 28 +++XkbBellNotifyEvent.type 0 +++XkbBellNotifyEvent.serial 4 +++XkbBellNotifyEvent.send_event 8 +++XkbBellNotifyEvent.display 12 +++XkbBellNotifyEvent.time 16 +++XkbBellNotifyEvent.xkb_type 20 +++XkbBellNotifyEvent.device 24 +++XkbBellNotifyEvent.percent 28 +++XkbBellNotifyEvent.pitch 32 +++XkbBellNotifyEvent.duration 36 +++XkbBellNotifyEvent.bell_class 40 +++XkbBellNotifyEvent.bell_id 44 +++XkbBellNotifyEvent.name 48 +++XkbBellNotifyEvent.window 52 +++XkbBellNotifyEvent.event_only 56 +++XkbBellNotifyEvent 60 +++XIMStringConversionText.length 0 +++XIMStringConversionText.feedback 2 +++XIMStringConversionText.encoding_is_wchar 6 +++XIMStringConversionText.string 10 +++XIMStringConversionText 14 +++XKeyboardState.key_click_percent 0 +++XKeyboardState.bell_percent 4 +++XKeyboardState.bell_pitch 8 +++XKeyboardState.bell_duration 12 +++XKeyboardState.led_mask 16 +++XKeyboardState.global_auto_repeat 20 +++XKeyboardState.auto_repeats 24 +++XKeyboardState 56 +++XkbEvent.type 0 +++XkbEvent.any 0 +++XkbEvent.new_kbd 0 +++XkbEvent.map 0 +++XkbEvent.state 0 +++XkbEvent.ctrls 0 +++XkbEvent.indicators 0 +++XkbEvent.names 0 +++XkbEvent.compat 0 +++XkbEvent.bell 0 +++XkbEvent.message 0 +++XkbEvent.accessx 0 +++XkbEvent.device 0 +++XkbEvent.core 0 +++XkbEvent 96 +++XPoint.x 0 +++XPoint.y 2 +++XPoint 4 +++XSegment.x1 0 +++XSegment.y1 2 +++XSegment.x2 4 +++XSegment.y2 6 +++XSegment 8 +++XIconSize.min_width 0 +++XIconSize.min_height 4 +++XIconSize.max_width 8 +++XIconSize.max_height 12 +++XIconSize.width_inc 16 +++XIconSize.height_inc 20 +++XIconSize 24 +++XIMCallback.client_data 0 +++XIMCallback.callback 4 +++XIMCallback 8 +++XConfigureEvent.type 0 +++XConfigureEvent.serial 4 +++XConfigureEvent.send_event 8 +++XConfigureEvent.display 12 +++XConfigureEvent.event 16 +++XConfigureEvent.window 20 +++XConfigureEvent.x 24 +++XConfigureEvent.y 28 +++XConfigureEvent.width 32 +++XConfigureEvent.height 36 +++XConfigureEvent.border_width 40 +++XConfigureEvent.above 44 +++XConfigureEvent.override_redirect 48 +++XConfigureEvent 52 +++XRectangle.x 0 +++XRectangle.y 2 +++XRectangle.width 4 +++XRectangle.height 6 +++XRectangle 8 +++XkbNamesNotifyEvent.type 0 +++XkbNamesNotifyEvent.serial 4 +++XkbNamesNotifyEvent.send_event 8 +++XkbNamesNotifyEvent.display 12 +++XkbNamesNotifyEvent.time 16 +++XkbNamesNotifyEvent.xkb_type 20 +++XkbNamesNotifyEvent.device 24 +++XkbNamesNotifyEvent.changed 28 +++XkbNamesNotifyEvent.first_type 32 +++XkbNamesNotifyEvent.num_types 36 +++XkbNamesNotifyEvent.first_lvl 40 +++XkbNamesNotifyEvent.num_lvls 44 +++XkbNamesNotifyEvent.num_aliases 48 +++XkbNamesNotifyEvent.num_radio_groups 52 +++XkbNamesNotifyEvent.changed_vmods 56 +++XkbNamesNotifyEvent.changed_groups 60 +++XkbNamesNotifyEvent.changed_indicators 64 +++XkbNamesNotifyEvent.first_key 68 +++XkbNamesNotifyEvent.num_keys 72 +++XkbNamesNotifyEvent 76 +++XCreateWindowEvent.type 0 +++XCreateWindowEvent.serial 4 +++XCreateWindowEvent.send_event 8 +++XCreateWindowEvent.display 12 +++XCreateWindowEvent.parent 16 +++XCreateWindowEvent.window 20 +++XCreateWindowEvent.x 24 +++XCreateWindowEvent.y 28 +++XCreateWindowEvent.width 32 +++XCreateWindowEvent.height 36 +++XCreateWindowEvent.border_width 40 +++XCreateWindowEvent.override_redirect 44 +++XCreateWindowEvent 48 +++XVisibilityEvent.type 0 +++XVisibilityEvent.serial 4 +++XVisibilityEvent.send_event 8 +++XVisibilityEvent.display 12 +++XVisibilityEvent.window 16 +++XVisibilityEvent.state 20 +++XVisibilityEvent 24 +++XWMHints.flags 0 +++XWMHints.initial_state 8 +++XWMHints.icon_pixmap 12 +++XWMHints.icon_window 16 +++XWMHints.icon_x 20 +++XWMHints.icon_y 24 +++XWMHints.icon_mask 28 +++XWMHints.input 4 +++XWMHints.window_group 32 +++XWMHints 36 +++XCrossingEvent.type 0 +++XCrossingEvent.serial 4 +++XCrossingEvent.send_event 8 +++XCrossingEvent.display 12 +++XCrossingEvent.window 16 +++XCrossingEvent.root 20 +++XCrossingEvent.subwindow 24 +++XCrossingEvent.time 28 +++XCrossingEvent.x 32 +++XCrossingEvent.y 36 +++XCrossingEvent.x_root 40 +++XCrossingEvent.y_root 44 +++XCrossingEvent.mode 48 +++XCrossingEvent.detail 52 +++XCrossingEvent.same_screen 56 +++XCrossingEvent.focus 60 +++XCrossingEvent.state 64 +++XCrossingEvent 68 +++XSelectionRequestEvent.type 0 +++XSelectionRequestEvent.serial 4 +++XSelectionRequestEvent.send_event 8 +++XSelectionRequestEvent.display 12 +++XSelectionRequestEvent.owner 16 +++XSelectionRequestEvent.requestor 20 +++XSelectionRequestEvent.selection 24 +++XSelectionRequestEvent.target 28 +++XSelectionRequestEvent.property 32 +++XSelectionRequestEvent.time 36 +++XSelectionRequestEvent 40 +++XNoExposeEvent.type 0 +++XNoExposeEvent.serial 4 +++XNoExposeEvent.send_event 8 +++XNoExposeEvent.display 12 +++XNoExposeEvent.drawable 16 +++XNoExposeEvent.major_code 20 +++XNoExposeEvent.minor_code 24 +++XNoExposeEvent 28 +++XHostAddress.family 0 +++XHostAddress.length 4 +++XHostAddress.address 8 +++XHostAddress 12 +++XColormapEvent.type 0 +++XColormapEvent.serial 4 +++XColormapEvent.send_event 8 +++XColormapEvent.display 12 +++XColormapEvent.window 16 +++XColormapEvent.colormap 20 +++XColormapEvent.new 24 +++XColormapEvent.state 28 +++XColormapEvent 32 +++ColorEntry.r 0 +++ColorEntry.g 1 +++ColorEntry.b 2 +++ColorEntry.flags 3 +++ColorEntry 4 +++XResizeRequestEvent.type 0 +++XResizeRequestEvent.serial 4 +++XResizeRequestEvent.send_event 8 +++XResizeRequestEvent.display 12 +++XResizeRequestEvent.window 16 +++XResizeRequestEvent.width 20 +++XResizeRequestEvent.height 24 +++XResizeRequestEvent 28 +++Depth.depth 0 +++Depth.nvisuals 4 +++Depth.visuals 8 +++Depth 12 +++XPropertyEvent.type 0 +++XPropertyEvent.serial 4 +++XPropertyEvent.send_event 8 +++XPropertyEvent.display 12 +++XPropertyEvent.window 16 +++XPropertyEvent.atom 20 +++XPropertyEvent.time 24 +++XPropertyEvent.state 28 +++XPropertyEvent 32 +++XDestroyWindowEvent.type 0 +++XDestroyWindowEvent.serial 4 +++XDestroyWindowEvent.send_event 8 +++XDestroyWindowEvent.display 12 +++XDestroyWindowEvent.event 16 +++XDestroyWindowEvent.window 20 +++XDestroyWindowEvent 24 +++XStandardColormap.colormap 0 +++XStandardColormap.red_max 4 +++XStandardColormap.red_mult 8 +++XStandardColormap.green_max 12 +++XStandardColormap.green_mult 16 +++XStandardColormap.blue_max 20 +++XStandardColormap.blue_mult 24 +++XStandardColormap.base_pixel 28 +++XStandardColormap.visualid 32 +++XStandardColormap.killid 36 +++XStandardColormap 40 +++XComposeStatus.compose_ptr 0 +++XComposeStatus.chars_matched 4 +++XComposeStatus 8 +++AwtGraphicsConfigData.awt_depth 0 +++AwtGraphicsConfigData.awt_cmap 4 +++AwtGraphicsConfigData.awt_visInfo 8 +++AwtGraphicsConfigData.awt_num_colors 48 +++AwtGraphicsConfigData.awtImage 52 +++AwtGraphicsConfigData.AwtColorMatch 56 +++AwtGraphicsConfigData.monoImage 60 +++AwtGraphicsConfigData.monoPixmap 64 +++AwtGraphicsConfigData.monoPixmapWidth 68 +++AwtGraphicsConfigData.monoPixmapHeight 72 +++AwtGraphicsConfigData.monoPixmapGC 76 +++AwtGraphicsConfigData.pixelStride 80 +++AwtGraphicsConfigData.color_data 84 +++AwtGraphicsConfigData.glxInfo 88 +++AwtGraphicsConfigData.isTranslucencySupported 92 +++AwtGraphicsConfigData.renderPictFormat 96 +++AwtGraphicsConfigData 128 +++XColor.pixel 0 +++XColor.red 4 +++XColor.green 6 +++XColor.blue 8 +++XColor.flags 10 +++XColor.pad 11 +++XColor 12 +++XTextProperty.value 0 +++XTextProperty.encoding 4 +++XTextProperty.format 8 +++XTextProperty.nitems 12 +++XTextProperty 16 ++--- a/make/modules/java.desktop/gensrc/GensrcX11Wrappers.gmk +++++ b/make/modules/java.desktop/gensrc/GensrcX11Wrappers.gmk ++@@ -29,9 +29,14 @@ ++ # Put the generated Java classes used to interface X11 from awt here. ++ GENSRC_X11WRAPPERS_OUTPUTDIR := $(SUPPORT_OUTPUTDIR)/gensrc/java.desktop/sun/awt/X11 ++ +++GENSRC_X11_VERSION_VARIANT := +++ifeq ($(OPENJDK_TARGET_OS)-$(OPENJDK_TARGET_CPU), linux-m68k) +++ GENSRC_X11_VERSION_VARIANT := -linux-m68k +++endif +++ ++ # The pre-calculated offset file are stored here: ++ GENSRC_X11WRAPPERS_DATADIR := $(TOPDIR)/make/data/x11wrappergen ++-GENSRC_X11WRAPPERS_DATA := $(GENSRC_X11WRAPPERS_DATADIR)/sizes-$(OPENJDK_TARGET_CPU_BITS).txt +++GENSRC_X11WRAPPERS_DATA := $(GENSRC_X11WRAPPERS_DATADIR)/sizes-$(OPENJDK_TARGET_CPU_BITS)$(GENSRC_X11_VERSION_VARIANT).txt ++ ++ # Run the tool on the offset files to generate several Java classes used in awt. ++ $(eval $(call SetupExecute, gen_x11wrappers, \ ++--- a/src/hotspot/share/memory/allocation.hpp +++++ b/src/hotspot/share/memory/allocation.hpp ++@@ -239,7 +239,7 @@ ++ ++ void operator delete(void* p) { FreeHeap(p); } ++ void operator delete [] (void* p) { FreeHeap(p); } ++-}; +++} __attribute__ ((aligned (4))); ++ ++ // Base class for objects allocated on the stack only. ++ // Calling new or delete will result in fatal error. ++@@ -250,7 +250,7 @@ ++ void* operator new [](size_t size) throw(); ++ void operator delete(void* p); ++ void operator delete [](void* p); ++-}; +++} __attribute__ ((aligned (4))); ++ ++ // Base class for objects stored in Metaspace. ++ // Calling delete will result in fatal error. ++@@ -372,7 +372,7 @@ ++ // that should be read-only by default. See symbol.hpp for an example. This function ++ // is used by the templates in metaspaceClosure.hpp ++ static bool is_read_only_by_default() { return false; } ++-}; +++} __attribute__ ((aligned (4))); ++ ++ // Base class for classes that constitute name spaces. ++ ++@@ -457,7 +457,7 @@ ++ ++ void operator delete(void* p); ++ void operator delete [](void* p); ++-}; +++} __attribute__ ((aligned (4))); ++ ++ // One of the following macros must be used when allocating an array ++ // or object to determine whether it should reside in the C heap on in ++--- a/src/hotspot/share/oops/constMethod.hpp +++++ b/src/hotspot/share/oops/constMethod.hpp ++@@ -525,6 +525,6 @@ ++ ++ // Verify ++ void verify_on(outputStream* st); ++-}; +++} __attribute__ ((aligned (4))); ++ ++ #endif // SHARE_OOPS_CONSTMETHOD_HPP ++--- a/src/hotspot/share/oops/oop.hpp +++++ b/src/hotspot/share/oops/oop.hpp ++@@ -315,6 +315,6 @@ ++ // Avoid include gc_globals.hpp in oop.inline.hpp ++ DEBUG_ONLY(bool get_UseParallelGC();) ++ DEBUG_ONLY(bool get_UseG1GC();) ++-}; +++} __attribute__ ((aligned (4))); ++ ++ #endif // SHARE_OOPS_OOP_HPP diff --cc debian/patches/machine-flag.diff index 000000000,000000000..92c729dcd new file mode 100644 --- /dev/null +++ b/debian/patches/machine-flag.diff @@@ -1,0 -1,0 +1,24 @@@ ++--- a/make/autoconf/flags.m4 +++++ b/make/autoconf/flags.m4 ++@@ -263,6 +263,7 @@ ++ test "x$OPENJDK_TARGET_CPU_ARCH" = xppc; then ++ MACHINE_FLAG="-m${OPENJDK_TARGET_CPU_BITS}" ++ fi +++ case "$host" in *x32) MACHINE_FLAG=;; esac ++ fi ++ ++ if test "x$OPENJDK_TARGET_OS" = xmacosx; then ++@@ -344,11 +345,8 @@ ++ AC_SUBST(COMPILER_COMMAND_FILE_FLAG) ++ AC_SUBST(COMPILER_BINDCMD_FILE_FLAG) ++ ++- # Check that the compiler supports -mX (or -qX on AIX) flags ++- # Set COMPILER_SUPPORTS_TARGET_BITS_FLAG to 'true' if it does ++- FLAGS_COMPILER_CHECK_ARGUMENTS(ARGUMENT: [${COMPILER_TARGET_BITS_FLAG}${OPENJDK_TARGET_CPU_BITS}], ++- IF_TRUE: [COMPILER_SUPPORTS_TARGET_BITS_FLAG=true], ++- IF_FALSE: [COMPILER_SUPPORTS_TARGET_BITS_FLAG=false]) +++ # Don't run the -mX check and set this always to false, or else x32 gets passed -m32 +++ COMPILER_SUPPORTS_TARGET_BITS_FLAG=false ++ AC_SUBST(COMPILER_SUPPORTS_TARGET_BITS_FLAG) ++ ++ if test "x$TOOLCHAIN_TYPE" = xmicrosoft; then diff --cc debian/patches/make-debug-print.diff index 000000000,000000000..70dcfbf4a new file mode 100644 --- /dev/null +++ b/debian/patches/make-debug-print.diff @@@ -1,0 -1,0 +1,10 @@@ ++--- a/make/Main.gmk +++++ b/make/Main.gmk ++@@ -1196,6 +1196,7 @@ ++ ++ # Aliases used for running tests. ++ +++$(call PrintVar, ALL_NAMED_TESTS) ++ # Let "run-test" be an alias for "test" ++ $(foreach t, $(ALL_NAMED_TESTS), $(eval run-test-$t: test-$t)) ++ RUN_TEST_TARGETS := $(addprefix run-test-, $(ALL_NAMED_TESTS)) diff --cc debian/patches/mips.diff index 000000000,000000000..99f597d3d new file mode 100644 --- /dev/null +++ b/debian/patches/mips.diff @@@ -1,0 -1,0 +1,13 @@@ ++# DP: Untested patch suggested to fix #983878 ++ ++--- a/src/java.base/unix/native/libnio/fs/UnixNativeDispatcher.c +++++ b/src/java.base/unix/native/libnio/fs/UnixNativeDispatcher.c ++@@ -156,7 +156,7 @@ ++ /** ++ * fstatat missing from glibc on Linux. ++ */ ++-#if defined(__linux__) && (defined(__i386) || defined(__arm__)) +++#if defined(__linux__) && (defined(__i386) || defined(__arm__) || (defined(__mips__) && !defined(__mips64))) ++ #define FSTATAT64_SYSCALL_AVAILABLE ++ static int fstatat64_wrapper(int dfd, const char *path, ++ struct stat64 *statbuf, int flag) diff --cc debian/patches/misalign-pointer-for-armhf.patch index 000000000,000000000..911755490 new file mode 100644 --- /dev/null +++ b/debian/patches/misalign-pointer-for-armhf.patch @@@ -1,0 -1,0 +1,23 @@@ ++Description: Misalign address in the test rather than structure ++ mk68-support.diff aligned metaspace structures by 4 bytes breaking the test. ++ Add 1 to address type rather than aligned structure to produce misaligned pointer. ++--- a/test/hotspot/gtest/metaspace/test_is_metaspace_obj.cpp +++++ b/test/hotspot/gtest/metaspace/test_is_metaspace_obj.cpp ++@@ -61,7 +61,7 @@ ++ ASSERT_TRUE(MetaspaceObj::is_valid(p)); ++ ++ // A misaligned object shall not be recognized ++- const MetaspaceObj* p_misaligned = (MetaspaceObj*)((address)p) + 1; +++ const MetaspaceObj* p_misaligned = (MetaspaceObj*)((address)p + 1); ++ ASSERT_FALSE(MetaspaceObj::is_valid(p_misaligned)); ++ ++ // Test VirtualSpaceList::contains ++@@ -72,7 +72,7 @@ ++ ASSERT_TRUE(vslist->contains((MetaWord*)p)); ++ ++ // A misaligned pointer shall still be recognized by list::contains ++- ASSERT_TRUE(vslist->contains((MetaWord*)((address)p) + 1)); +++ ASSERT_TRUE(vslist->contains((MetaWord*)((address)p + 1))); ++ ++ // Now for some bogus values ++ ASSERT_FALSE(MetaspaceObj::is_valid((MetaspaceObj*)NULL)); diff --cc debian/patches/multiple-pkcs11-library-init.diff index 000000000,000000000..0238c4ffd new file mode 100644 --- /dev/null +++ b/debian/patches/multiple-pkcs11-library-init.diff @@@ -1,0 -1,0 +1,72 @@@ ++# HG changeset patch ++# User andrew ++# Date 1352129932 0 ++# Node ID e9c857dcb964dbfa5eef3a3590244cb4d999cf7a ++# Parent 1406789608b76d0906881979335d685855f44190 ++Allow multiple PKCS11 library initialisation to be a non-critical error. ++ ++--- a/src/jdk.crypto.cryptoki/share/classes/sun/security/pkcs11/Config.java +++++ b/src/jdk.crypto.cryptoki/share/classes/sun/security/pkcs11/Config.java ++@@ -52,6 +52,7 @@ ++ static final int ERR_HALT = 1; ++ static final int ERR_IGNORE_ALL = 2; ++ static final int ERR_IGNORE_LIB = 3; +++ static final int ERR_IGNORE_MULTI_INIT = 4; ++ ++ // same as allowSingleThreadedModules but controlled via a system property ++ // and applied to all providers. if set to false, no SunPKCS11 instances ++@@ -1023,6 +1024,8 @@ ++ handleStartupErrors = ERR_IGNORE_LIB; ++ } else if (val.equals("halt")) { ++ handleStartupErrors = ERR_HALT; +++ } else if (val.equals("ignoreMultipleInitialisation")) { +++ handleStartupErrors = ERR_IGNORE_MULTI_INIT; ++ } else { ++ throw excToken("Invalid value for handleStartupErrors:"); ++ } ++--- a/src/jdk.crypto.cryptoki/share/classes/sun/security/pkcs11/SunPKCS11.java +++++ b/src/jdk.crypto.cryptoki/share/classes/sun/security/pkcs11/SunPKCS11.java ++@@ -179,26 +179,37 @@ ++ String nssLibraryDirectory = config.getNssLibraryDirectory(); ++ String nssSecmodDirectory = config.getNssSecmodDirectory(); ++ boolean nssOptimizeSpace = config.getNssOptimizeSpace(); +++ int errorHandling = config.getHandleStartupErrors(); ++ ++ if (secmod.isInitialized()) { ++ if (nssSecmodDirectory != null) { ++ String s = secmod.getConfigDir(); ++ if ((s != null) && ++ (s.equals(nssSecmodDirectory) == false)) { ++- throw new ProviderException("Secmod directory " ++- + nssSecmodDirectory ++- + " invalid, NSS already initialized with " ++- + s); +++ String msg = "Secmod directory " + nssSecmodDirectory +++ + " invalid, NSS already initialized with " + s; +++ if (errorHandling == Config.ERR_IGNORE_MULTI_INIT || +++ errorHandling == Config.ERR_IGNORE_ALL) { +++ throw new UnsupportedOperationException(msg); +++ } else { +++ throw new ProviderException(msg); +++ } ++ } ++ } ++ if (nssLibraryDirectory != null) { ++ String s = secmod.getLibDir(); ++ if ((s != null) && ++ (s.equals(nssLibraryDirectory) == false)) { ++- throw new ProviderException("NSS library directory " +++ String msg = "NSS library directory " ++ + nssLibraryDirectory ++ + " invalid, NSS already initialized with " ++- + s); +++ + s; +++ if (errorHandling == Config.ERR_IGNORE_MULTI_INIT || +++ errorHandling == Config.ERR_IGNORE_ALL) { +++ throw new UnsupportedOperationException(msg); +++ } else { +++ throw new ProviderException(msg); +++ } ++ } ++ } ++ } else { diff --cc debian/patches/nspr+nss-headers.diff index 000000000,000000000..235bb6d3e new file mode 100644 --- /dev/null +++ b/debian/patches/nspr+nss-headers.diff @@@ -1,0 -1,0 +1,9090 @@@ ++Replace header files in src/jdk.crypto.cryptoki/share/native/libj2pkcs11. ++ ++Import from NSPR 4.29: ++ - prcpucfg.h ++ - prtypes.h ++ ++Import from NSS 2.63: ++ - pkcs11.h ++ - pkcs11f.h ++ - pkcs11n.h ++ - pkcs11p.h ++ - pkcs11t.h ++ - pkcs11u.h ++ ++src/jdk.crypto.cryptoki/unix/native/libj2pkcs11/p11_md.h: ++ - Remove macro CK_PTR ++ - Remove macro CK_DEFINE_FUNCTION ++ - Remove macro CK_DECLARE_FUNCTION ++ - Remove macro CK_DECLARE_FUNCTION_POINTER ++ - Remove macro CK_CALLBACK_FUNCTION ++ ++src/jdk.crypto.cryptoki/share/native/libj2pkcs11/pkcs11wrapper.h: ++ - use CK_TRUE/CK_FALSE instead of TRUE/FALSE ++ ++src/jdk.crypto.cryptoki/share/native/libj2pkcs11/p11_convert.c: ++ - FIXME: CKM_KEA_DERIVE is undefined ++ ++src/jdk.crypto.cryptoki/share/native/libj2pkcs11/p11_util.c: ++ - FIXME: CKM_CAMELLIA_CTR is undefined ++ ++ ++--- a/src/jdk.crypto.cryptoki/share/native/libj2pkcs11/pkcs11.h +++++ b/src/jdk.crypto.cryptoki/share/native/libj2pkcs11/pkcs11.h ++@@ -1,10 +1,15 @@ ++-/* Copyright (c) OASIS Open 2016-2019. All Rights Reserved. ++- * Distributed under the terms of the OASIS IPR Policy, ++- * [http://www.oasis-open.org/policies-guidelines/ipr], AS-IS, WITHOUT ANY ++- * IMPLIED OR EXPRESS WARRANTY; there is no warranty of MERCHANTABILITY, FITNESS FOR A ++- * PARTICULAR PURPOSE or NONINFRINGEMENT of the rights of others. +++/* This Source Code Form is subject to the terms of the Mozilla Public +++ * License, v. 2.0. If a copy of the MPL was not distributed with this +++ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ +++/* +++ * Copyright (C) 1994-1999 RSA Security Inc. Licence to copy this document +++ * is granted provided that it is identified as "RSA Security In.c Public-Key +++ * Cryptography Standards (PKCS)" in all material mentioning or referencing +++ * this document. +++ * +++ * The latest version of this header can be found at: +++ * http://www.rsalabs.com/pkcs/pkcs-11/index.html ++ */ ++- ++ #ifndef _PKCS11_H_ ++ #define _PKCS11_H_ 1 ++ ++@@ -13,20 +18,21 @@ extern "C" { ++ #endif ++ ++ /* Before including this file (pkcs11.h) (or pkcs11t.h by ++- * itself), 5 platform-specific macros must be defined. These +++ * itself), 6 platform-specific macros must be defined. These ++ * macros are described below, and typical definitions for them ++ * are also given. Be advised that these definitions can depend ++ * on both the platform and the compiler used (and possibly also ++- * on whether a Cryptoki library is linked statically or +++ * on whether a PKCS #11 library is linked statically or ++ * dynamically). ++ * ++- * In addition to defining these 5 macros, the packing convention ++- * for Cryptoki structures should be set. The Cryptoki +++ * In addition to defining these 6 macros, the packing convention +++ * for PKCS #11 structures should be set. The PKCS #11 ++ * convention on packing is that structures should be 1-byte ++ * aligned. ++ * ++- * If you're using Windows this might be done by using the following ++- * preprocessor directive before including pkcs11.h or pkcs11t.h: +++ * In a Win32 environment, this might be done by using the +++ * following preprocessor directive before including pkcs11.h +++ * or pkcs11t.h: ++ * ++ * #pragma pack(push, cryptoki, 1) ++ * ++@@ -35,8 +41,8 @@ extern "C" { ++ * ++ * #pragma pack(pop, cryptoki) ++ * ++- * In a UNIX environment, you're on your own for this. You might ++- * not need to do (or be able to do!) anything. +++ * In a UNIX environment, you're on your own here. You might +++ * not need to do anything. ++ * ++ * ++ * Now for the macros: ++@@ -47,17 +53,42 @@ extern "C" { ++ * ++ * typedef CK_BYTE CK_PTR CK_BYTE_PTR; ++ * ++- * If you're using windows, it might be defined by: +++ * In a Win32 environment, it might be defined by ++ * ++ * #define CK_PTR * ++ * ++- * In a typical UNIX environment, it might be defined by: +++ * In a UNIX environment, it might be defined by ++ * ++ * #define CK_PTR * ++ * ++ * ++- * 2. CK_DECLARE_FUNCTION(returnType, name): A macro which makes ++- * an importable Cryptoki library function declaration out of a +++ * 2. CK_DEFINE_FUNCTION(returnType, name): A macro which makes +++ * an exportable PKCS #11 library function definition out of a +++ * return type and a function name. It should be used in the +++ * following fashion to define the exposed PKCS #11 functions in +++ * a PKCS #11 library: +++ * +++ * CK_DEFINE_FUNCTION(CK_RV, C_Initialize)( +++ * CK_VOID_PTR pReserved +++ * ) +++ * { +++ * ... +++ * } +++ * +++ * For defining a function in a Win32 PKCS #11 .dll, it might be +++ * defined by +++ * +++ * #define CK_DEFINE_FUNCTION(returnType, name) \ +++ * returnType __declspec(dllexport) name +++ * +++ * In a UNIX environment, it might be defined by +++ * +++ * #define CK_DEFINE_FUNCTION(returnType, name) \ +++ * returnType name +++ * +++ * +++ * 3. CK_DECLARE_FUNCTION(returnType, name): A macro which makes +++ * an importable PKCS #11 library function declaration out of a ++ * return type and a function name. It should be used in the ++ * following fashion: ++ * ++@@ -65,49 +96,49 @@ extern "C" { ++ * CK_VOID_PTR pReserved ++ * ); ++ * ++- * If you're using Windows to declare a function in a Win32 cryptoki .dll, ++- * it might be defined by: +++ * For declaring a function in a Win32 PKCS #11 .dll, it might +++ * be defined by ++ * ++ * #define CK_DECLARE_FUNCTION(returnType, name) \ ++ * returnType __declspec(dllimport) name ++ * ++- * In a UNIX environment, it might be defined by: +++ * In a UNIX environment, it might be defined by ++ * ++ * #define CK_DECLARE_FUNCTION(returnType, name) \ ++ * returnType name ++ * ++ * ++- * 3. CK_DECLARE_FUNCTION_POINTER(returnType, name): A macro ++- * which makes a Cryptoki API function pointer declaration or +++ * 4. CK_DECLARE_FUNCTION_POINTER(returnType, name): A macro +++ * which makes a PKCS #11 API function pointer declaration or ++ * function pointer type declaration out of a return type and a ++ * function name. It should be used in the following fashion: ++ * ++- * // Define funcPtr to be a pointer to a Cryptoki API function +++ * // Define funcPtr to be a pointer to a PKCS #11 API function ++ * // taking arguments args and returning CK_RV. ++ * CK_DECLARE_FUNCTION_POINTER(CK_RV, funcPtr)(args); ++ * ++ * or ++ * ++ * // Define funcPtrType to be the type of a pointer to a ++- * // Cryptoki API function taking arguments args and returning +++ * // PKCS #11 API function taking arguments args and returning ++ * // CK_RV, and then define funcPtr to be a variable of type ++ * // funcPtrType. ++ * typedef CK_DECLARE_FUNCTION_POINTER(CK_RV, funcPtrType)(args); ++ * funcPtrType funcPtr; ++ * ++- * If you're using Windows to access ++- * functions in a Win32 Cryptoki .dll, in might be defined by: +++ * For accessing functions in a Win32 PKCS #11 .dll, in might be +++ * defined by ++ * ++ * #define CK_DECLARE_FUNCTION_POINTER(returnType, name) \ ++ * returnType __declspec(dllimport) (* name) ++ * ++- * In a UNIX environment, it might be defined by: +++ * In a UNIX environment, it might be defined by ++ * ++ * #define CK_DECLARE_FUNCTION_POINTER(returnType, name) \ ++ * returnType (* name) ++ * ++ * ++- * 4. CK_CALLBACK_FUNCTION(returnType, name): A macro which makes +++ * 5. CK_CALLBACK_FUNCTION(returnType, name): A macro which makes ++ * a function pointer type for an application callback out of ++ * a return type for the callback and a name for the callback. ++ * It should be used in the following fashion: ++@@ -121,77 +152,76 @@ extern "C" { ++ * typedef CK_CALLBACK_FUNCTION(CK_RV, myCallbackType)(args); ++ * myCallbackType myCallback; ++ * ++- * If you're using Windows, it might be defined by: +++ * In a Win32 environment, it might be defined by ++ * ++ * #define CK_CALLBACK_FUNCTION(returnType, name) \ ++ * returnType (* name) ++ * ++- * In a UNIX environment, it might be defined by: +++ * In a UNIX environment, it might be defined by ++ * ++ * #define CK_CALLBACK_FUNCTION(returnType, name) \ ++ * returnType (* name) ++ * ++ * ++- * 5. NULL_PTR: This macro is the value of a NULL pointer. +++ * 6. NULL_PTR: This macro is the value of a NULL pointer. ++ * ++ * In any ANSI/ISO C environment (and in many others as well), ++- * this should best be defined by +++ * this should be defined by ++ * ++ * #ifndef NULL_PTR ++ * #define NULL_PTR 0 ++ * #endif ++ */ ++ ++- ++-/* All the various Cryptoki types and #define'd values are in the ++- * file pkcs11t.h. ++- */ +++/* All the various PKCS #11 types and #define'd values are in the +++ * file pkcs11t.h. */ ++ #include "pkcs11t.h" ++ ++-#define __PASTE(x,y) x##y +++#define __PASTE(x, y) x##y ++ +++#ifndef CK_PKCS11_3_0 +++/* remember that we set it so we can unset it at the end */ +++#define __NSS_CK_PKCS11_3_IMPLICIT 1 +++#define CK_PKCS11_3_0 1 +++#endif ++ ++ /* ============================================================== ++ * Define the "extern" form of all the entry points. ++ * ============================================================== ++ */ ++ ++-#define CK_NEED_ARG_LIST 1 +++#define CK_NEED_ARG_LIST 1 ++ #define CK_PKCS11_FUNCTION_INFO(name) \ ++- extern CK_DECLARE_FUNCTION(CK_RV, name) +++ CK_DECLARE_FUNCTION(CK_RV, name) ++ ++-/* pkcs11f.h has all the information about the Cryptoki ++- * function prototypes. ++- */ +++/* pkcs11f.h has all the information about the PKCS #11 +++ * function prototypes. */ ++ #include "pkcs11f.h" ++ ++ #undef CK_NEED_ARG_LIST ++ #undef CK_PKCS11_FUNCTION_INFO ++ ++- ++ /* ============================================================== ++ * Define the typedef form of all the entry points. That is, for ++- * each Cryptoki function C_XXX, define a type CK_C_XXX which is +++ * each PKCS #11 function C_XXX, define a type CK_C_XXX which is ++ * a pointer to that kind of function. ++ * ============================================================== ++ */ ++ ++-#define CK_NEED_ARG_LIST 1 +++#define CK_NEED_ARG_LIST 1 ++ #define CK_PKCS11_FUNCTION_INFO(name) \ ++- typedef CK_DECLARE_FUNCTION_POINTER(CK_RV, __PASTE(CK_,name)) +++ typedef CK_DECLARE_FUNCTION_POINTER(CK_RV, __PASTE(CK_, name)) ++ ++-/* pkcs11f.h has all the information about the Cryptoki ++- * function prototypes. ++- */ +++/* pkcs11f.h has all the information about the PKCS #11 +++ * function prototypes. */ ++ #include "pkcs11f.h" ++ ++ #undef CK_NEED_ARG_LIST ++ #undef CK_PKCS11_FUNCTION_INFO ++ ++- ++ /* ============================================================== ++- * Define structed vector of entry points. A CK_FUNCTION_LIST ++- * contains a CK_VERSION indicating a library's Cryptoki version +++ * Define structed vector of entry points. A CK_FUNCTION_3_0_LIST +++ * contains a CK_VERSION indicating a library's PKCS #11 version ++ * and then a whole slew of function pointers to the routines in ++ * the library. This type was declared, but not defined, in ++ * pkcs11t.h. ++@@ -199,39 +229,41 @@ extern "C" { ++ */ ++ ++ #define CK_PKCS11_FUNCTION_INFO(name) \ ++- __PASTE(CK_,name) name; +++ __PASTE(CK_, name) \ +++ name; ++ ++-/* Create the 3.0 Function list */ +++#include "pkcs11p.h" ++ struct CK_FUNCTION_LIST_3_0 { ++ ++- CK_VERSION version; /* Cryptoki version */ +++ CK_VERSION version; /* PKCS #11 version */ ++ ++-/* Pile all the function pointers into the CK_FUNCTION_LIST. */ ++-/* pkcs11f.h has all the information about the Cryptoki ++- * function prototypes. ++- */ +++/* Pile all the function pointers into the CK_FUNCTION_LIST_3_0. */ +++/* pkcs11f.h has all the information about the PKCS #11 +++ * function prototypes. */ ++ #include "pkcs11f.h" ++- ++ }; ++ ++ #define CK_PKCS11_2_0_ONLY 1 ++ ++-/* Continue to define the old CK_FUNCTION_LIST */ +++/* now define the 2.0 function list */ ++ struct CK_FUNCTION_LIST { ++ ++- CK_VERSION version; /* Cryptoki version */ +++ CK_VERSION version; /* PKCS #11 version */ ++ ++ /* Pile all the function pointers into the CK_FUNCTION_LIST. */ ++-/* pkcs11f.h has all the information about the Cryptoki ++- * function prototypes. ++- */ +++/* pkcs11f.h has all the information about the PKCS #11 +++ * function prototypes. */ ++ #include "pkcs11f.h" ++- ++ }; +++#include "pkcs11u.h" ++ ++ #undef CK_PKCS11_FUNCTION_INFO ++ #undef CK_PKCS11_2_0_ONLY ++ +++#ifdef __NSS_CK_PKCS11_3_IMPLICIT +++#undef CK_PKCS11_3_0 +++#undef __NSS_CK_PKCS11_3_IMPLICIT +++#endif ++ ++ #undef __PASTE ++ ++@@ -239,6 +271,4 @@ struct CK_FUNCTION_LIST { ++ } ++ #endif ++ ++-#endif /* _PKCS11_H_ */ ++- ++- +++#endif ++--- a/src/jdk.crypto.cryptoki/share/native/libj2pkcs11/pkcs11f.h +++++ b/src/jdk.crypto.cryptoki/share/native/libj2pkcs11/pkcs11f.h ++@@ -1,1197 +1,1044 @@ ++-/* Copyright (c) OASIS Open 2016, 2019. All Rights Reserved./ ++- * /Distributed under the terms of the OASIS IPR Policy, ++- * [http://www.oasis-open.org/policies-guidelines/ipr], AS-IS, WITHOUT ANY ++- * IMPLIED OR EXPRESS WARRANTY; there is no warranty of MERCHANTABILITY, FITNESS FOR A ++- * PARTICULAR PURPOSE or NONINFRINGEMENT of the rights of others. ++- */ ++- ++-/* Latest version of the specification: ++- * http://docs.oasis-open.org/pkcs11/pkcs11-base/v2.40/pkcs11-base-v2.40.html ++- */ ++- ++-/* This header file contains pretty much everything about all the ++- * Cryptoki function prototypes. Because this information is ++- * used for more than just declaring function prototypes, the ++- * order of the functions appearing herein is important, and ++- * should not be altered. ++- */ +++/* This Source Code Form is subject to the terms of the Mozilla Public +++ * License, v. 2.0. If a copy of the MPL was not distributed with this +++ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ +++/* +++ * Copyright (C) 1994-1999 RSA Security Inc. Licence to copy this document +++ * is granted provided that it is identified as "RSA Security In.c Public-Key +++ * Cryptography Standards (PKCS)" in all material mentioning or referencing +++ * this document. +++ */ +++/* This function contains pretty much everything about all the */ +++/* PKCS #11 function prototypes. Because this information is */ +++/* used for more than just declaring function prototypes, the */ +++/* order of the functions appearing herein is important, and */ +++/* should not be altered. */ ++ ++ /* General-purpose */ ++ ++-/* C_Initialize initializes the Cryptoki library. */ +++/* C_Initialize initializes the PKCS #11 library. */ ++ CK_PKCS11_FUNCTION_INFO(C_Initialize) ++ #ifdef CK_NEED_ARG_LIST ++ ( ++- CK_VOID_PTR pInitArgs /* if this is not NULL_PTR, it gets ++- * cast to CK_C_INITIALIZE_ARGS_PTR ++- * and dereferenced ++- */ ++-); +++ CK_VOID_PTR pInitArgs /* if this is not NULL_PTR, it gets +++ * cast to CK_C_INITIALIZE_ARGS_PTR +++ * and dereferenced */ +++ ); ++ #endif ++ ++- ++ /* C_Finalize indicates that an application is done with the ++- * Cryptoki library. ++- */ +++ * PKCS #11 library. */ ++ CK_PKCS11_FUNCTION_INFO(C_Finalize) ++ #ifdef CK_NEED_ARG_LIST ++ ( ++- CK_VOID_PTR pReserved /* reserved. Should be NULL_PTR */ ++-); +++ CK_VOID_PTR pReserved /* reserved. Should be NULL_PTR */ +++ ); ++ #endif ++ ++- ++-/* C_GetInfo returns general information about Cryptoki. */ +++/* C_GetInfo returns general information about PKCS #11. */ ++ CK_PKCS11_FUNCTION_INFO(C_GetInfo) ++ #ifdef CK_NEED_ARG_LIST ++ ( ++- CK_INFO_PTR pInfo /* location that receives information */ ++-); +++ CK_INFO_PTR pInfo /* location that receives information */ +++ ); ++ #endif ++ ++- ++ /* C_GetFunctionList returns the function list. */ ++ CK_PKCS11_FUNCTION_INFO(C_GetFunctionList) ++ #ifdef CK_NEED_ARG_LIST ++ ( ++- CK_FUNCTION_LIST_PTR_PTR ppFunctionList /* receives pointer to ++- * function list ++- */ ++-); +++ CK_FUNCTION_LIST_PTR_PTR ppFunctionList /* receives pointer to +++ * function list */ +++ ); ++ #endif ++ ++- ++- ++ /* Slot and token management */ ++ ++ /* C_GetSlotList obtains a list of slots in the system. */ ++ CK_PKCS11_FUNCTION_INFO(C_GetSlotList) ++ #ifdef CK_NEED_ARG_LIST ++ ( ++- CK_BBOOL tokenPresent, /* only slots with tokens */ ++- CK_SLOT_ID_PTR pSlotList, /* receives array of slot IDs */ ++- CK_ULONG_PTR pulCount /* receives number of slots */ ++-); +++ CK_BBOOL tokenPresent, /* only slots with tokens? */ +++ CK_SLOT_ID_PTR pSlotList, /* receives array of slot IDs */ +++ CK_ULONG_PTR pulCount /* receives number of slots */ +++ ); ++ #endif ++ ++- ++ /* C_GetSlotInfo obtains information about a particular slot in ++- * the system. ++- */ +++ * the system. */ ++ CK_PKCS11_FUNCTION_INFO(C_GetSlotInfo) ++ #ifdef CK_NEED_ARG_LIST ++ ( ++- CK_SLOT_ID slotID, /* the ID of the slot */ ++- CK_SLOT_INFO_PTR pInfo /* receives the slot information */ ++-); +++ CK_SLOT_ID slotID, /* the ID of the slot */ +++ CK_SLOT_INFO_PTR pInfo /* receives the slot information */ +++ ); ++ #endif ++ ++- ++ /* C_GetTokenInfo obtains information about a particular token ++- * in the system. ++- */ +++ * in the system. */ ++ CK_PKCS11_FUNCTION_INFO(C_GetTokenInfo) ++ #ifdef CK_NEED_ARG_LIST ++ ( ++- CK_SLOT_ID slotID, /* ID of the token's slot */ ++- CK_TOKEN_INFO_PTR pInfo /* receives the token information */ ++-); +++ CK_SLOT_ID slotID, /* ID of the token's slot */ +++ CK_TOKEN_INFO_PTR pInfo /* receives the token information */ +++ ); ++ #endif ++ ++- ++ /* C_GetMechanismList obtains a list of mechanism types ++- * supported by a token. ++- */ +++ * supported by a token. */ ++ CK_PKCS11_FUNCTION_INFO(C_GetMechanismList) ++ #ifdef CK_NEED_ARG_LIST ++ ( ++- CK_SLOT_ID slotID, /* ID of token's slot */ ++- CK_MECHANISM_TYPE_PTR pMechanismList, /* gets mech. array */ ++- CK_ULONG_PTR pulCount /* gets # of mechs. */ ++-); +++ CK_SLOT_ID slotID, /* ID of token's slot */ +++ CK_MECHANISM_TYPE_PTR pMechanismList, /* gets mech. array */ +++ CK_ULONG_PTR pulCount /* gets # of mechs. */ +++ ); ++ #endif ++ ++- ++ /* C_GetMechanismInfo obtains information about a particular ++- * mechanism possibly supported by a token. ++- */ +++ * mechanism possibly supported by a token. */ ++ CK_PKCS11_FUNCTION_INFO(C_GetMechanismInfo) ++ #ifdef CK_NEED_ARG_LIST ++ ( ++- CK_SLOT_ID slotID, /* ID of the token's slot */ ++- CK_MECHANISM_TYPE type, /* type of mechanism */ ++- CK_MECHANISM_INFO_PTR pInfo /* receives mechanism info */ ++-); +++ CK_SLOT_ID slotID, /* ID of the token's slot */ +++ CK_MECHANISM_TYPE type, /* type of mechanism */ +++ CK_MECHANISM_INFO_PTR pInfo /* receives mechanism info */ +++ ); ++ #endif ++ ++- ++ /* C_InitToken initializes a token. */ ++ CK_PKCS11_FUNCTION_INFO(C_InitToken) ++ #ifdef CK_NEED_ARG_LIST +++/* pLabel changed from CK_CHAR_PTR to CK_UTF8CHAR_PTR for v2.10 */ ++ ( ++- CK_SLOT_ID slotID, /* ID of the token's slot */ ++- CK_UTF8CHAR_PTR pPin, /* the SO's initial PIN */ ++- CK_ULONG ulPinLen, /* length in bytes of the PIN */ ++- CK_UTF8CHAR_PTR pLabel /* 32-byte token label (blank padded) */ ++-); +++ CK_SLOT_ID slotID, /* ID of the token's slot */ +++ CK_UTF8CHAR_PTR pPin, /* the SO's initial PIN */ +++ CK_ULONG ulPinLen, /* length in bytes of the PIN */ +++ CK_UTF8CHAR_PTR pLabel /* 32-byte token label (blank padded) */ +++ ); ++ #endif ++ ++- ++ /* C_InitPIN initializes the normal user's PIN. */ ++ CK_PKCS11_FUNCTION_INFO(C_InitPIN) ++ #ifdef CK_NEED_ARG_LIST ++ ( ++- CK_SESSION_HANDLE hSession, /* the session's handle */ ++- CK_UTF8CHAR_PTR pPin, /* the normal user's PIN */ ++- CK_ULONG ulPinLen /* length in bytes of the PIN */ ++-); +++ CK_SESSION_HANDLE hSession, /* the session's handle */ +++ CK_UTF8CHAR_PTR pPin, /* the normal user's PIN */ +++ CK_ULONG ulPinLen /* length in bytes of the PIN */ +++ ); ++ #endif ++ ++- ++ /* C_SetPIN modifies the PIN of the user who is logged in. */ ++ CK_PKCS11_FUNCTION_INFO(C_SetPIN) ++ #ifdef CK_NEED_ARG_LIST ++ ( ++- CK_SESSION_HANDLE hSession, /* the session's handle */ ++- CK_UTF8CHAR_PTR pOldPin, /* the old PIN */ ++- CK_ULONG ulOldLen, /* length of the old PIN */ ++- CK_UTF8CHAR_PTR pNewPin, /* the new PIN */ ++- CK_ULONG ulNewLen /* length of the new PIN */ ++-); +++ CK_SESSION_HANDLE hSession, /* the session's handle */ +++ CK_UTF8CHAR_PTR pOldPin, /* the old PIN */ +++ CK_ULONG ulOldLen, /* length of the old PIN */ +++ CK_UTF8CHAR_PTR pNewPin, /* the new PIN */ +++ CK_ULONG ulNewLen /* length of the new PIN */ +++ ); ++ #endif ++ ++- ++- ++ /* Session management */ ++ ++ /* C_OpenSession opens a session between an application and a ++- * token. ++- */ +++ * token. */ ++ CK_PKCS11_FUNCTION_INFO(C_OpenSession) ++ #ifdef CK_NEED_ARG_LIST ++ ( ++- CK_SLOT_ID slotID, /* the slot's ID */ ++- CK_FLAGS flags, /* from CK_SESSION_INFO */ ++- CK_VOID_PTR pApplication, /* passed to callback */ ++- CK_NOTIFY Notify, /* callback function */ ++- CK_SESSION_HANDLE_PTR phSession /* gets session handle */ ++-); +++ CK_SLOT_ID slotID, /* the slot's ID */ +++ CK_FLAGS flags, /* from CK_SESSION_INFO */ +++ CK_VOID_PTR pApplication, /* passed to callback */ +++ CK_NOTIFY Notify, /* callback function */ +++ CK_SESSION_HANDLE_PTR phSession /* gets session handle */ +++ ); ++ #endif ++ ++- ++ /* C_CloseSession closes a session between an application and a ++- * token. ++- */ +++ * token. */ ++ CK_PKCS11_FUNCTION_INFO(C_CloseSession) ++ #ifdef CK_NEED_ARG_LIST ++ ( ++- CK_SESSION_HANDLE hSession /* the session's handle */ ++-); +++ CK_SESSION_HANDLE hSession /* the session's handle */ +++ ); ++ #endif ++ ++- ++ /* C_CloseAllSessions closes all sessions with a token. */ ++ CK_PKCS11_FUNCTION_INFO(C_CloseAllSessions) ++ #ifdef CK_NEED_ARG_LIST ++ ( ++- CK_SLOT_ID slotID /* the token's slot */ ++-); +++ CK_SLOT_ID slotID /* the token's slot */ +++ ); ++ #endif ++ ++- ++ /* C_GetSessionInfo obtains information about the session. */ ++ CK_PKCS11_FUNCTION_INFO(C_GetSessionInfo) ++ #ifdef CK_NEED_ARG_LIST ++ ( ++- CK_SESSION_HANDLE hSession, /* the session's handle */ ++- CK_SESSION_INFO_PTR pInfo /* receives session info */ ++-); +++ CK_SESSION_HANDLE hSession, /* the session's handle */ +++ CK_SESSION_INFO_PTR pInfo /* receives session info */ +++ ); ++ #endif ++ ++- ++ /* C_GetOperationState obtains the state of the cryptographic operation ++- * in a session. ++- */ +++ * in a session. */ ++ CK_PKCS11_FUNCTION_INFO(C_GetOperationState) ++ #ifdef CK_NEED_ARG_LIST ++ ( ++- CK_SESSION_HANDLE hSession, /* session's handle */ ++- CK_BYTE_PTR pOperationState, /* gets state */ ++- CK_ULONG_PTR pulOperationStateLen /* gets state length */ ++-); +++ CK_SESSION_HANDLE hSession, /* session's handle */ +++ CK_BYTE_PTR pOperationState, /* gets state */ +++ CK_ULONG_PTR pulOperationStateLen /* gets state length */ +++ ); ++ #endif ++ ++- ++ /* C_SetOperationState restores the state of the cryptographic ++- * operation in a session. ++- */ +++ * operation in a session. */ ++ CK_PKCS11_FUNCTION_INFO(C_SetOperationState) ++ #ifdef CK_NEED_ARG_LIST ++ ( ++- CK_SESSION_HANDLE hSession, /* session's handle */ ++- CK_BYTE_PTR pOperationState, /* holds state */ ++- CK_ULONG ulOperationStateLen, /* holds state length */ ++- CK_OBJECT_HANDLE hEncryptionKey, /* en/decryption key */ ++- CK_OBJECT_HANDLE hAuthenticationKey /* sign/verify key */ ++-); +++ CK_SESSION_HANDLE hSession, /* session's handle */ +++ CK_BYTE_PTR pOperationState, /* holds state */ +++ CK_ULONG ulOperationStateLen, /* holds state length */ +++ CK_OBJECT_HANDLE hEncryptionKey, /* en/decryption key */ +++ CK_OBJECT_HANDLE hAuthenticationKey /* sign/verify key */ +++ ); ++ #endif ++ ++- ++ /* C_Login logs a user into a token. */ ++ CK_PKCS11_FUNCTION_INFO(C_Login) ++ #ifdef CK_NEED_ARG_LIST ++ ( ++- CK_SESSION_HANDLE hSession, /* the session's handle */ ++- CK_USER_TYPE userType, /* the user type */ ++- CK_UTF8CHAR_PTR pPin, /* the user's PIN */ ++- CK_ULONG ulPinLen /* the length of the PIN */ ++-); +++ CK_SESSION_HANDLE hSession, /* the session's handle */ +++ CK_USER_TYPE userType, /* the user type */ +++ CK_UTF8CHAR_PTR pPin, /* the user's PIN */ +++ CK_ULONG ulPinLen /* the length of the PIN */ +++ ); ++ #endif ++ ++- ++ /* C_Logout logs a user out from a token. */ ++ CK_PKCS11_FUNCTION_INFO(C_Logout) ++ #ifdef CK_NEED_ARG_LIST ++ ( ++- CK_SESSION_HANDLE hSession /* the session's handle */ ++-); +++ CK_SESSION_HANDLE hSession /* the session's handle */ +++ ); ++ #endif ++ ++- ++- ++ /* Object management */ ++ ++ /* C_CreateObject creates a new object. */ ++ CK_PKCS11_FUNCTION_INFO(C_CreateObject) ++ #ifdef CK_NEED_ARG_LIST ++ ( ++- CK_SESSION_HANDLE hSession, /* the session's handle */ ++- CK_ATTRIBUTE_PTR pTemplate, /* the object's template */ ++- CK_ULONG ulCount, /* attributes in template */ ++- CK_OBJECT_HANDLE_PTR phObject /* gets new object's handle. */ ++-); +++ CK_SESSION_HANDLE hSession, /* the session's handle */ +++ CK_ATTRIBUTE_PTR pTemplate, /* the object's template */ +++ CK_ULONG ulCount, /* attributes in template */ +++ CK_OBJECT_HANDLE_PTR phObject /* gets new object's handle. */ +++ ); ++ #endif ++ ++- ++ /* C_CopyObject copies an object, creating a new object for the ++- * copy. ++- */ +++ * copy. */ ++ CK_PKCS11_FUNCTION_INFO(C_CopyObject) ++ #ifdef CK_NEED_ARG_LIST ++ ( ++- CK_SESSION_HANDLE hSession, /* the session's handle */ ++- CK_OBJECT_HANDLE hObject, /* the object's handle */ ++- CK_ATTRIBUTE_PTR pTemplate, /* template for new object */ ++- CK_ULONG ulCount, /* attributes in template */ ++- CK_OBJECT_HANDLE_PTR phNewObject /* receives handle of copy */ ++-); +++ CK_SESSION_HANDLE hSession, /* the session's handle */ +++ CK_OBJECT_HANDLE hObject, /* the object's handle */ +++ CK_ATTRIBUTE_PTR pTemplate, /* template for new object */ +++ CK_ULONG ulCount, /* attributes in template */ +++ CK_OBJECT_HANDLE_PTR phNewObject /* receives handle of copy */ +++ ); ++ #endif ++ ++- ++ /* C_DestroyObject destroys an object. */ ++ CK_PKCS11_FUNCTION_INFO(C_DestroyObject) ++ #ifdef CK_NEED_ARG_LIST ++ ( ++- CK_SESSION_HANDLE hSession, /* the session's handle */ ++- CK_OBJECT_HANDLE hObject /* the object's handle */ ++-); +++ CK_SESSION_HANDLE hSession, /* the session's handle */ +++ CK_OBJECT_HANDLE hObject /* the object's handle */ +++ ); ++ #endif ++ ++- ++ /* C_GetObjectSize gets the size of an object in bytes. */ ++ CK_PKCS11_FUNCTION_INFO(C_GetObjectSize) ++ #ifdef CK_NEED_ARG_LIST ++ ( ++- CK_SESSION_HANDLE hSession, /* the session's handle */ ++- CK_OBJECT_HANDLE hObject, /* the object's handle */ ++- CK_ULONG_PTR pulSize /* receives size of object */ ++-); +++ CK_SESSION_HANDLE hSession, /* the session's handle */ +++ CK_OBJECT_HANDLE hObject, /* the object's handle */ +++ CK_ULONG_PTR pulSize /* receives size of object */ +++ ); ++ #endif ++ ++- ++ /* C_GetAttributeValue obtains the value of one or more object ++- * attributes. ++- */ +++ * attributes. */ ++ CK_PKCS11_FUNCTION_INFO(C_GetAttributeValue) ++ #ifdef CK_NEED_ARG_LIST ++ ( ++- CK_SESSION_HANDLE hSession, /* the session's handle */ ++- CK_OBJECT_HANDLE hObject, /* the object's handle */ ++- CK_ATTRIBUTE_PTR pTemplate, /* specifies attrs; gets vals */ ++- CK_ULONG ulCount /* attributes in template */ ++-); +++ CK_SESSION_HANDLE hSession, /* the session's handle */ +++ CK_OBJECT_HANDLE hObject, /* the object's handle */ +++ CK_ATTRIBUTE_PTR pTemplate, /* specifies attrs; gets vals */ +++ CK_ULONG ulCount /* attributes in template */ +++ ); ++ #endif ++ ++- ++ /* C_SetAttributeValue modifies the value of one or more object ++- * attributes. ++- */ +++ * attributes */ ++ CK_PKCS11_FUNCTION_INFO(C_SetAttributeValue) ++ #ifdef CK_NEED_ARG_LIST ++ ( ++- CK_SESSION_HANDLE hSession, /* the session's handle */ ++- CK_OBJECT_HANDLE hObject, /* the object's handle */ ++- CK_ATTRIBUTE_PTR pTemplate, /* specifies attrs and values */ ++- CK_ULONG ulCount /* attributes in template */ ++-); +++ CK_SESSION_HANDLE hSession, /* the session's handle */ +++ CK_OBJECT_HANDLE hObject, /* the object's handle */ +++ CK_ATTRIBUTE_PTR pTemplate, /* specifies attrs and values */ +++ CK_ULONG ulCount /* attributes in template */ +++ ); ++ #endif ++ ++- ++ /* C_FindObjectsInit initializes a search for token and session ++- * objects that match a template. ++- */ +++ * objects that match a template. */ ++ CK_PKCS11_FUNCTION_INFO(C_FindObjectsInit) ++ #ifdef CK_NEED_ARG_LIST ++ ( ++- CK_SESSION_HANDLE hSession, /* the session's handle */ ++- CK_ATTRIBUTE_PTR pTemplate, /* attribute values to match */ ++- CK_ULONG ulCount /* attrs in search template */ ++-); +++ CK_SESSION_HANDLE hSession, /* the session's handle */ +++ CK_ATTRIBUTE_PTR pTemplate, /* attribute values to match */ +++ CK_ULONG ulCount /* attrs in search template */ +++ ); ++ #endif ++ ++- ++ /* C_FindObjects continues a search for token and session ++ * objects that match a template, obtaining additional object ++- * handles. ++- */ +++ * handles. */ ++ CK_PKCS11_FUNCTION_INFO(C_FindObjects) ++ #ifdef CK_NEED_ARG_LIST ++ ( ++- CK_SESSION_HANDLE hSession, /* session's handle */ ++- CK_OBJECT_HANDLE_PTR phObject, /* gets obj. handles */ ++- CK_ULONG ulMaxObjectCount, /* max handles to get */ ++- CK_ULONG_PTR pulObjectCount /* actual # returned */ ++-); +++ CK_SESSION_HANDLE hSession, /* session's handle */ +++ CK_OBJECT_HANDLE_PTR phObject, /* gets obj. handles */ +++ CK_ULONG ulMaxObjectCount, /* max handles to get */ +++ CK_ULONG_PTR pulObjectCount /* actual # returned */ +++ ); ++ #endif ++ ++- ++ /* C_FindObjectsFinal finishes a search for token and session ++- * objects. ++- */ +++ * objects. */ ++ CK_PKCS11_FUNCTION_INFO(C_FindObjectsFinal) ++ #ifdef CK_NEED_ARG_LIST ++ ( ++- CK_SESSION_HANDLE hSession /* the session's handle */ ++-); +++ CK_SESSION_HANDLE hSession /* the session's handle */ +++ ); ++ #endif ++ ++- ++- ++ /* Encryption and decryption */ ++ ++ /* C_EncryptInit initializes an encryption operation. */ ++ CK_PKCS11_FUNCTION_INFO(C_EncryptInit) ++ #ifdef CK_NEED_ARG_LIST ++ ( ++- CK_SESSION_HANDLE hSession, /* the session's handle */ ++- CK_MECHANISM_PTR pMechanism, /* the encryption mechanism */ ++- CK_OBJECT_HANDLE hKey /* handle of encryption key */ ++-); +++ CK_SESSION_HANDLE hSession, /* the session's handle */ +++ CK_MECHANISM_PTR pMechanism, /* the encryption mechanism */ +++ CK_OBJECT_HANDLE hKey /* handle of encryption key */ +++ ); ++ #endif ++ ++- ++ /* C_Encrypt encrypts single-part data. */ ++ CK_PKCS11_FUNCTION_INFO(C_Encrypt) ++ #ifdef CK_NEED_ARG_LIST ++ ( ++- CK_SESSION_HANDLE hSession, /* session's handle */ ++- CK_BYTE_PTR pData, /* the plaintext data */ ++- CK_ULONG ulDataLen, /* bytes of plaintext */ ++- CK_BYTE_PTR pEncryptedData, /* gets ciphertext */ ++- CK_ULONG_PTR pulEncryptedDataLen /* gets c-text size */ ++-); +++ CK_SESSION_HANDLE hSession, /* session's handle */ +++ CK_BYTE_PTR pData, /* the plaintext data */ +++ CK_ULONG ulDataLen, /* bytes of plaintext */ +++ CK_BYTE_PTR pEncryptedData, /* gets ciphertext */ +++ CK_ULONG_PTR pulEncryptedDataLen /* gets c-text size */ +++ ); ++ #endif ++ ++- ++ /* C_EncryptUpdate continues a multiple-part encryption ++- * operation. ++- */ +++ * operation. */ ++ CK_PKCS11_FUNCTION_INFO(C_EncryptUpdate) ++ #ifdef CK_NEED_ARG_LIST ++ ( ++- CK_SESSION_HANDLE hSession, /* session's handle */ ++- CK_BYTE_PTR pPart, /* the plaintext data */ ++- CK_ULONG ulPartLen, /* plaintext data len */ ++- CK_BYTE_PTR pEncryptedPart, /* gets ciphertext */ ++- CK_ULONG_PTR pulEncryptedPartLen /* gets c-text size */ ++-); +++ CK_SESSION_HANDLE hSession, /* session's handle */ +++ CK_BYTE_PTR pPart, /* the plaintext data */ +++ CK_ULONG ulPartLen, /* plaintext data len */ +++ CK_BYTE_PTR pEncryptedPart, /* gets ciphertext */ +++ CK_ULONG_PTR pulEncryptedPartLen /* gets c-text size */ +++ ); ++ #endif ++ ++- ++ /* C_EncryptFinal finishes a multiple-part encryption ++- * operation. ++- */ +++ * operation. */ ++ CK_PKCS11_FUNCTION_INFO(C_EncryptFinal) ++ #ifdef CK_NEED_ARG_LIST ++ ( ++- CK_SESSION_HANDLE hSession, /* session handle */ ++- CK_BYTE_PTR pLastEncryptedPart, /* last c-text */ ++- CK_ULONG_PTR pulLastEncryptedPartLen /* gets last size */ ++-); +++ CK_SESSION_HANDLE hSession, /* session handle */ +++ CK_BYTE_PTR pLastEncryptedPart, /* last c-text */ +++ CK_ULONG_PTR pulLastEncryptedPartLen /* gets last size */ +++ ); ++ #endif ++ ++- ++ /* C_DecryptInit initializes a decryption operation. */ ++ CK_PKCS11_FUNCTION_INFO(C_DecryptInit) ++ #ifdef CK_NEED_ARG_LIST ++ ( ++- CK_SESSION_HANDLE hSession, /* the session's handle */ ++- CK_MECHANISM_PTR pMechanism, /* the decryption mechanism */ ++- CK_OBJECT_HANDLE hKey /* handle of decryption key */ ++-); +++ CK_SESSION_HANDLE hSession, /* the session's handle */ +++ CK_MECHANISM_PTR pMechanism, /* the decryption mechanism */ +++ CK_OBJECT_HANDLE hKey /* handle of decryption key */ +++ ); ++ #endif ++ ++- ++ /* C_Decrypt decrypts encrypted data in a single part. */ ++ CK_PKCS11_FUNCTION_INFO(C_Decrypt) ++ #ifdef CK_NEED_ARG_LIST ++ ( ++- CK_SESSION_HANDLE hSession, /* session's handle */ ++- CK_BYTE_PTR pEncryptedData, /* ciphertext */ ++- CK_ULONG ulEncryptedDataLen, /* ciphertext length */ ++- CK_BYTE_PTR pData, /* gets plaintext */ ++- CK_ULONG_PTR pulDataLen /* gets p-text size */ ++-); +++ CK_SESSION_HANDLE hSession, /* session's handle */ +++ CK_BYTE_PTR pEncryptedData, /* ciphertext */ +++ CK_ULONG ulEncryptedDataLen, /* ciphertext length */ +++ CK_BYTE_PTR pData, /* gets plaintext */ +++ CK_ULONG_PTR pulDataLen /* gets p-text size */ +++ ); ++ #endif ++ ++- ++ /* C_DecryptUpdate continues a multiple-part decryption ++- * operation. ++- */ +++ * operation. */ ++ CK_PKCS11_FUNCTION_INFO(C_DecryptUpdate) ++ #ifdef CK_NEED_ARG_LIST ++ ( ++- CK_SESSION_HANDLE hSession, /* session's handle */ ++- CK_BYTE_PTR pEncryptedPart, /* encrypted data */ ++- CK_ULONG ulEncryptedPartLen, /* input length */ ++- CK_BYTE_PTR pPart, /* gets plaintext */ ++- CK_ULONG_PTR pulPartLen /* p-text size */ ++-); +++ CK_SESSION_HANDLE hSession, /* session's handle */ +++ CK_BYTE_PTR pEncryptedPart, /* encrypted data */ +++ CK_ULONG ulEncryptedPartLen, /* input length */ +++ CK_BYTE_PTR pPart, /* gets plaintext */ +++ CK_ULONG_PTR pulPartLen /* p-text size */ +++ ); ++ #endif ++ ++- ++ /* C_DecryptFinal finishes a multiple-part decryption ++- * operation. ++- */ +++ * operation. */ ++ CK_PKCS11_FUNCTION_INFO(C_DecryptFinal) ++ #ifdef CK_NEED_ARG_LIST ++ ( ++- CK_SESSION_HANDLE hSession, /* the session's handle */ ++- CK_BYTE_PTR pLastPart, /* gets plaintext */ ++- CK_ULONG_PTR pulLastPartLen /* p-text size */ ++-); +++ CK_SESSION_HANDLE hSession, /* the session's handle */ +++ CK_BYTE_PTR pLastPart, /* gets plaintext */ +++ CK_ULONG_PTR pulLastPartLen /* p-text size */ +++ ); ++ #endif ++ ++- ++- ++ /* Message digesting */ ++ ++ /* C_DigestInit initializes a message-digesting operation. */ ++ CK_PKCS11_FUNCTION_INFO(C_DigestInit) ++ #ifdef CK_NEED_ARG_LIST ++ ( ++- CK_SESSION_HANDLE hSession, /* the session's handle */ ++- CK_MECHANISM_PTR pMechanism /* the digesting mechanism */ ++-); +++ CK_SESSION_HANDLE hSession, /* the session's handle */ +++ CK_MECHANISM_PTR pMechanism /* the digesting mechanism */ +++ ); ++ #endif ++ ++- ++ /* C_Digest digests data in a single part. */ ++ CK_PKCS11_FUNCTION_INFO(C_Digest) ++ #ifdef CK_NEED_ARG_LIST ++ ( ++- CK_SESSION_HANDLE hSession, /* the session's handle */ ++- CK_BYTE_PTR pData, /* data to be digested */ ++- CK_ULONG ulDataLen, /* bytes of data to digest */ ++- CK_BYTE_PTR pDigest, /* gets the message digest */ ++- CK_ULONG_PTR pulDigestLen /* gets digest length */ ++-); +++ CK_SESSION_HANDLE hSession, /* the session's handle */ +++ CK_BYTE_PTR pData, /* data to be digested */ +++ CK_ULONG ulDataLen, /* bytes of data to digest */ +++ CK_BYTE_PTR pDigest, /* gets the message digest */ +++ CK_ULONG_PTR pulDigestLen /* gets digest length */ +++ ); ++ #endif ++ ++- ++ /* C_DigestUpdate continues a multiple-part message-digesting ++- * operation. ++- */ +++ * operation. */ ++ CK_PKCS11_FUNCTION_INFO(C_DigestUpdate) ++ #ifdef CK_NEED_ARG_LIST ++ ( ++- CK_SESSION_HANDLE hSession, /* the session's handle */ ++- CK_BYTE_PTR pPart, /* data to be digested */ ++- CK_ULONG ulPartLen /* bytes of data to be digested */ ++-); +++ CK_SESSION_HANDLE hSession, /* the session's handle */ +++ CK_BYTE_PTR pPart, /* data to be digested */ +++ CK_ULONG ulPartLen /* bytes of data to be digested */ +++ ); ++ #endif ++ ++- ++ /* C_DigestKey continues a multi-part message-digesting ++ * operation, by digesting the value of a secret key as part of ++- * the data already digested. ++- */ +++ * the data already digested. */ ++ CK_PKCS11_FUNCTION_INFO(C_DigestKey) ++ #ifdef CK_NEED_ARG_LIST ++ ( ++- CK_SESSION_HANDLE hSession, /* the session's handle */ ++- CK_OBJECT_HANDLE hKey /* secret key to digest */ ++-); +++ CK_SESSION_HANDLE hSession, /* the session's handle */ +++ CK_OBJECT_HANDLE hKey /* secret key to digest */ +++ ); ++ #endif ++ ++- ++ /* C_DigestFinal finishes a multiple-part message-digesting ++- * operation. ++- */ +++ * operation. */ ++ CK_PKCS11_FUNCTION_INFO(C_DigestFinal) ++ #ifdef CK_NEED_ARG_LIST ++ ( ++- CK_SESSION_HANDLE hSession, /* the session's handle */ ++- CK_BYTE_PTR pDigest, /* gets the message digest */ ++- CK_ULONG_PTR pulDigestLen /* gets byte count of digest */ ++-); +++ CK_SESSION_HANDLE hSession, /* the session's handle */ +++ CK_BYTE_PTR pDigest, /* gets the message digest */ +++ CK_ULONG_PTR pulDigestLen /* gets byte count of digest */ +++ ); ++ #endif ++ ++- ++- ++ /* Signing and MACing */ ++ ++ /* C_SignInit initializes a signature (private key encryption) ++ * operation, where the signature is (will be) an appendix to ++ * the data, and plaintext cannot be recovered from the ++- * signature. ++- */ +++ *signature. */ ++ CK_PKCS11_FUNCTION_INFO(C_SignInit) ++ #ifdef CK_NEED_ARG_LIST ++ ( ++- CK_SESSION_HANDLE hSession, /* the session's handle */ ++- CK_MECHANISM_PTR pMechanism, /* the signature mechanism */ ++- CK_OBJECT_HANDLE hKey /* handle of signature key */ ++-); +++ CK_SESSION_HANDLE hSession, /* the session's handle */ +++ CK_MECHANISM_PTR pMechanism, /* the signature mechanism */ +++ CK_OBJECT_HANDLE hKey /* handle of signature key */ +++ ); ++ #endif ++ ++- ++ /* C_Sign signs (encrypts with private key) data in a single ++ * part, where the signature is (will be) an appendix to the ++- * data, and plaintext cannot be recovered from the signature. ++- */ +++ * data, and plaintext cannot be recovered from the signature. */ ++ CK_PKCS11_FUNCTION_INFO(C_Sign) ++ #ifdef CK_NEED_ARG_LIST ++ ( ++- CK_SESSION_HANDLE hSession, /* the session's handle */ ++- CK_BYTE_PTR pData, /* the data to sign */ ++- CK_ULONG ulDataLen, /* count of bytes to sign */ ++- CK_BYTE_PTR pSignature, /* gets the signature */ ++- CK_ULONG_PTR pulSignatureLen /* gets signature length */ ++-); +++ CK_SESSION_HANDLE hSession, /* the session's handle */ +++ CK_BYTE_PTR pData, /* the data to sign */ +++ CK_ULONG ulDataLen, /* count of bytes to sign */ +++ CK_BYTE_PTR pSignature, /* gets the signature */ +++ CK_ULONG_PTR pulSignatureLen /* gets signature length */ +++ ); ++ #endif ++ ++- ++ /* C_SignUpdate continues a multiple-part signature operation, ++ * where the signature is (will be) an appendix to the data, ++- * and plaintext cannot be recovered from the signature. ++- */ +++ * and plaintext cannot be recovered from the signature. */ ++ CK_PKCS11_FUNCTION_INFO(C_SignUpdate) ++ #ifdef CK_NEED_ARG_LIST ++ ( ++- CK_SESSION_HANDLE hSession, /* the session's handle */ ++- CK_BYTE_PTR pPart, /* the data to sign */ ++- CK_ULONG ulPartLen /* count of bytes to sign */ ++-); +++ CK_SESSION_HANDLE hSession, /* the session's handle */ +++ CK_BYTE_PTR pPart, /* the data to sign */ +++ CK_ULONG ulPartLen /* count of bytes to sign */ +++ ); ++ #endif ++ ++- ++ /* C_SignFinal finishes a multiple-part signature operation, ++- * returning the signature. ++- */ +++ * returning the signature. */ ++ CK_PKCS11_FUNCTION_INFO(C_SignFinal) ++ #ifdef CK_NEED_ARG_LIST ++ ( ++- CK_SESSION_HANDLE hSession, /* the session's handle */ ++- CK_BYTE_PTR pSignature, /* gets the signature */ ++- CK_ULONG_PTR pulSignatureLen /* gets signature length */ ++-); +++ CK_SESSION_HANDLE hSession, /* the session's handle */ +++ CK_BYTE_PTR pSignature, /* gets the signature */ +++ CK_ULONG_PTR pulSignatureLen /* gets signature length */ +++ ); ++ #endif ++ ++- ++ /* C_SignRecoverInit initializes a signature operation, where ++- * the data can be recovered from the signature. ++- */ +++ * the data can be recovered from the signature. */ ++ CK_PKCS11_FUNCTION_INFO(C_SignRecoverInit) ++ #ifdef CK_NEED_ARG_LIST ++ ( ++- CK_SESSION_HANDLE hSession, /* the session's handle */ ++- CK_MECHANISM_PTR pMechanism, /* the signature mechanism */ ++- CK_OBJECT_HANDLE hKey /* handle of the signature key */ ++-); +++ CK_SESSION_HANDLE hSession, /* the session's handle */ +++ CK_MECHANISM_PTR pMechanism, /* the signature mechanism */ +++ CK_OBJECT_HANDLE hKey /* handle of the signature key */ +++ ); ++ #endif ++ ++- ++ /* C_SignRecover signs data in a single operation, where the ++- * data can be recovered from the signature. ++- */ +++ * data can be recovered from the signature. */ ++ CK_PKCS11_FUNCTION_INFO(C_SignRecover) ++ #ifdef CK_NEED_ARG_LIST ++ ( ++- CK_SESSION_HANDLE hSession, /* the session's handle */ ++- CK_BYTE_PTR pData, /* the data to sign */ ++- CK_ULONG ulDataLen, /* count of bytes to sign */ ++- CK_BYTE_PTR pSignature, /* gets the signature */ ++- CK_ULONG_PTR pulSignatureLen /* gets signature length */ ++-); +++ CK_SESSION_HANDLE hSession, /* the session's handle */ +++ CK_BYTE_PTR pData, /* the data to sign */ +++ CK_ULONG ulDataLen, /* count of bytes to sign */ +++ CK_BYTE_PTR pSignature, /* gets the signature */ +++ CK_ULONG_PTR pulSignatureLen /* gets signature length */ +++ ); ++ #endif ++ ++- ++- ++ /* Verifying signatures and MACs */ ++ ++ /* C_VerifyInit initializes a verification operation, where the ++ * signature is an appendix to the data, and plaintext cannot ++- * cannot be recovered from the signature (e.g. DSA). ++- */ +++ * cannot be recovered from the signature (e.g. DSA). */ ++ CK_PKCS11_FUNCTION_INFO(C_VerifyInit) ++ #ifdef CK_NEED_ARG_LIST ++ ( ++- CK_SESSION_HANDLE hSession, /* the session's handle */ ++- CK_MECHANISM_PTR pMechanism, /* the verification mechanism */ ++- CK_OBJECT_HANDLE hKey /* verification key */ ++-); +++ CK_SESSION_HANDLE hSession, /* the session's handle */ +++ CK_MECHANISM_PTR pMechanism, /* the verification mechanism */ +++ CK_OBJECT_HANDLE hKey /* verification key */ +++ ); ++ #endif ++ ++- ++ /* C_Verify verifies a signature in a single-part operation, ++ * where the signature is an appendix to the data, and plaintext ++- * cannot be recovered from the signature. ++- */ +++ * cannot be recovered from the signature. */ ++ CK_PKCS11_FUNCTION_INFO(C_Verify) ++ #ifdef CK_NEED_ARG_LIST ++ ( ++- CK_SESSION_HANDLE hSession, /* the session's handle */ ++- CK_BYTE_PTR pData, /* signed data */ ++- CK_ULONG ulDataLen, /* length of signed data */ ++- CK_BYTE_PTR pSignature, /* signature */ ++- CK_ULONG ulSignatureLen /* signature length*/ ++-); +++ CK_SESSION_HANDLE hSession, /* the session's handle */ +++ CK_BYTE_PTR pData, /* signed data */ +++ CK_ULONG ulDataLen, /* length of signed data */ +++ CK_BYTE_PTR pSignature, /* signature */ +++ CK_ULONG ulSignatureLen /* signature length*/ +++ ); ++ #endif ++ ++- ++ /* C_VerifyUpdate continues a multiple-part verification ++ * operation, where the signature is an appendix to the data, ++- * and plaintext cannot be recovered from the signature. ++- */ +++ * and plaintext cannot be recovered from the signature. */ ++ CK_PKCS11_FUNCTION_INFO(C_VerifyUpdate) ++ #ifdef CK_NEED_ARG_LIST ++ ( ++- CK_SESSION_HANDLE hSession, /* the session's handle */ ++- CK_BYTE_PTR pPart, /* signed data */ ++- CK_ULONG ulPartLen /* length of signed data */ ++-); +++ CK_SESSION_HANDLE hSession, /* the session's handle */ +++ CK_BYTE_PTR pPart, /* signed data */ +++ CK_ULONG ulPartLen /* length of signed data */ +++ ); ++ #endif ++ ++- ++ /* C_VerifyFinal finishes a multiple-part verification ++- * operation, checking the signature. ++- */ +++ * operation, checking the signature. */ ++ CK_PKCS11_FUNCTION_INFO(C_VerifyFinal) ++ #ifdef CK_NEED_ARG_LIST ++ ( ++- CK_SESSION_HANDLE hSession, /* the session's handle */ ++- CK_BYTE_PTR pSignature, /* signature to verify */ ++- CK_ULONG ulSignatureLen /* signature length */ ++-); +++ CK_SESSION_HANDLE hSession, /* the session's handle */ +++ CK_BYTE_PTR pSignature, /* signature to verify */ +++ CK_ULONG ulSignatureLen /* signature length */ +++ ); ++ #endif ++ ++- ++ /* C_VerifyRecoverInit initializes a signature verification ++- * operation, where the data is recovered from the signature. ++- */ +++ * operation, where the data is recovered from the signature. */ ++ CK_PKCS11_FUNCTION_INFO(C_VerifyRecoverInit) ++ #ifdef CK_NEED_ARG_LIST ++ ( ++- CK_SESSION_HANDLE hSession, /* the session's handle */ ++- CK_MECHANISM_PTR pMechanism, /* the verification mechanism */ ++- CK_OBJECT_HANDLE hKey /* verification key */ ++-); +++ CK_SESSION_HANDLE hSession, /* the session's handle */ +++ CK_MECHANISM_PTR pMechanism, /* the verification mechanism */ +++ CK_OBJECT_HANDLE hKey /* verification key */ +++ ); ++ #endif ++ ++- ++ /* C_VerifyRecover verifies a signature in a single-part ++- * operation, where the data is recovered from the signature. ++- */ +++ * operation, where the data is recovered from the signature. */ ++ CK_PKCS11_FUNCTION_INFO(C_VerifyRecover) ++ #ifdef CK_NEED_ARG_LIST ++ ( ++- CK_SESSION_HANDLE hSession, /* the session's handle */ ++- CK_BYTE_PTR pSignature, /* signature to verify */ ++- CK_ULONG ulSignatureLen, /* signature length */ ++- CK_BYTE_PTR pData, /* gets signed data */ ++- CK_ULONG_PTR pulDataLen /* gets signed data len */ ++-); +++ CK_SESSION_HANDLE hSession, /* the session's handle */ +++ CK_BYTE_PTR pSignature, /* signature to verify */ +++ CK_ULONG ulSignatureLen, /* signature length */ +++ CK_BYTE_PTR pData, /* gets signed data */ +++ CK_ULONG_PTR pulDataLen /* gets signed data len */ +++ ); ++ #endif ++ ++- ++- ++ /* Dual-function cryptographic operations */ ++ ++ /* C_DigestEncryptUpdate continues a multiple-part digesting ++- * and encryption operation. ++- */ +++ * and encryption operation. */ ++ CK_PKCS11_FUNCTION_INFO(C_DigestEncryptUpdate) ++ #ifdef CK_NEED_ARG_LIST ++ ( ++- CK_SESSION_HANDLE hSession, /* session's handle */ ++- CK_BYTE_PTR pPart, /* the plaintext data */ ++- CK_ULONG ulPartLen, /* plaintext length */ ++- CK_BYTE_PTR pEncryptedPart, /* gets ciphertext */ ++- CK_ULONG_PTR pulEncryptedPartLen /* gets c-text length */ ++-); +++ CK_SESSION_HANDLE hSession, /* session's handle */ +++ CK_BYTE_PTR pPart, /* the plaintext data */ +++ CK_ULONG ulPartLen, /* plaintext length */ +++ CK_BYTE_PTR pEncryptedPart, /* gets ciphertext */ +++ CK_ULONG_PTR pulEncryptedPartLen /* gets c-text length */ +++ ); ++ #endif ++ ++- ++ /* C_DecryptDigestUpdate continues a multiple-part decryption and ++- * digesting operation. ++- */ +++ * digesting operation. */ ++ CK_PKCS11_FUNCTION_INFO(C_DecryptDigestUpdate) ++ #ifdef CK_NEED_ARG_LIST ++ ( ++- CK_SESSION_HANDLE hSession, /* session's handle */ ++- CK_BYTE_PTR pEncryptedPart, /* ciphertext */ ++- CK_ULONG ulEncryptedPartLen, /* ciphertext length */ ++- CK_BYTE_PTR pPart, /* gets plaintext */ ++- CK_ULONG_PTR pulPartLen /* gets plaintext len */ ++-); +++ CK_SESSION_HANDLE hSession, /* session's handle */ +++ CK_BYTE_PTR pEncryptedPart, /* ciphertext */ +++ CK_ULONG ulEncryptedPartLen, /* ciphertext length */ +++ CK_BYTE_PTR pPart, /* gets plaintext */ +++ CK_ULONG_PTR pulPartLen /* gets plaintext len */ +++ ); ++ #endif ++ ++- ++ /* C_SignEncryptUpdate continues a multiple-part signing and ++- * encryption operation. ++- */ +++ * encryption operation. */ ++ CK_PKCS11_FUNCTION_INFO(C_SignEncryptUpdate) ++ #ifdef CK_NEED_ARG_LIST ++ ( ++- CK_SESSION_HANDLE hSession, /* session's handle */ ++- CK_BYTE_PTR pPart, /* the plaintext data */ ++- CK_ULONG ulPartLen, /* plaintext length */ ++- CK_BYTE_PTR pEncryptedPart, /* gets ciphertext */ ++- CK_ULONG_PTR pulEncryptedPartLen /* gets c-text length */ ++-); +++ CK_SESSION_HANDLE hSession, /* session's handle */ +++ CK_BYTE_PTR pPart, /* the plaintext data */ +++ CK_ULONG ulPartLen, /* plaintext length */ +++ CK_BYTE_PTR pEncryptedPart, /* gets ciphertext */ +++ CK_ULONG_PTR pulEncryptedPartLen /* gets c-text length */ +++ ); ++ #endif ++ ++- ++ /* C_DecryptVerifyUpdate continues a multiple-part decryption and ++- * verify operation. ++- */ +++ * verify operation. */ ++ CK_PKCS11_FUNCTION_INFO(C_DecryptVerifyUpdate) ++ #ifdef CK_NEED_ARG_LIST ++ ( ++- CK_SESSION_HANDLE hSession, /* session's handle */ ++- CK_BYTE_PTR pEncryptedPart, /* ciphertext */ ++- CK_ULONG ulEncryptedPartLen, /* ciphertext length */ ++- CK_BYTE_PTR pPart, /* gets plaintext */ ++- CK_ULONG_PTR pulPartLen /* gets p-text length */ ++-); +++ CK_SESSION_HANDLE hSession, /* session's handle */ +++ CK_BYTE_PTR pEncryptedPart, /* ciphertext */ +++ CK_ULONG ulEncryptedPartLen, /* ciphertext length */ +++ CK_BYTE_PTR pPart, /* gets plaintext */ +++ CK_ULONG_PTR pulPartLen /* gets p-text length */ +++ ); ++ #endif ++ ++- ++- ++ /* Key management */ ++ ++ /* C_GenerateKey generates a secret key, creating a new key ++- * object. ++- */ +++ * object. */ ++ CK_PKCS11_FUNCTION_INFO(C_GenerateKey) ++ #ifdef CK_NEED_ARG_LIST ++ ( ++- CK_SESSION_HANDLE hSession, /* the session's handle */ ++- CK_MECHANISM_PTR pMechanism, /* key generation mech. */ ++- CK_ATTRIBUTE_PTR pTemplate, /* template for new key */ ++- CK_ULONG ulCount, /* # of attrs in template */ ++- CK_OBJECT_HANDLE_PTR phKey /* gets handle of new key */ ++-); +++ CK_SESSION_HANDLE hSession, /* the session's handle */ +++ CK_MECHANISM_PTR pMechanism, /* key generation mech. */ +++ CK_ATTRIBUTE_PTR pTemplate, /* template for new key */ +++ CK_ULONG ulCount, /* # of attrs in template */ +++ CK_OBJECT_HANDLE_PTR phKey /* gets handle of new key */ +++ ); ++ #endif ++ ++- ++ /* C_GenerateKeyPair generates a public-key/private-key pair, ++- * creating new key objects. ++- */ +++ * creating new key objects. */ ++ CK_PKCS11_FUNCTION_INFO(C_GenerateKeyPair) ++ #ifdef CK_NEED_ARG_LIST ++ ( ++- CK_SESSION_HANDLE hSession, /* session handle */ ++- CK_MECHANISM_PTR pMechanism, /* key-gen mech. */ ++- CK_ATTRIBUTE_PTR pPublicKeyTemplate, /* template for pub. key */ ++- CK_ULONG ulPublicKeyAttributeCount, /* # pub. attrs. */ ++- CK_ATTRIBUTE_PTR pPrivateKeyTemplate, /* template for priv. key */ ++- CK_ULONG ulPrivateKeyAttributeCount, /* # priv. attrs. */ ++- CK_OBJECT_HANDLE_PTR phPublicKey, /* gets pub. key handle */ ++- CK_OBJECT_HANDLE_PTR phPrivateKey /* gets priv. key handle */ ++-); +++ CK_SESSION_HANDLE hSession, /* session handle */ +++ CK_MECHANISM_PTR pMechanism, /* key-gen mech. */ +++ CK_ATTRIBUTE_PTR pPublicKeyTemplate, /* template for pub. key */ +++ CK_ULONG ulPublicKeyAttributeCount, /* # pub. attrs. */ +++ CK_ATTRIBUTE_PTR pPrivateKeyTemplate, /* template for priv. key */ +++ CK_ULONG ulPrivateKeyAttributeCount, /* # priv. attrs. */ +++ CK_OBJECT_HANDLE_PTR phPublicKey, /* gets pub. key handle */ +++ CK_OBJECT_HANDLE_PTR phPrivateKey /* gets priv. key handle */ +++ ); ++ #endif ++ ++- ++ /* C_WrapKey wraps (i.e., encrypts) a key. */ ++ CK_PKCS11_FUNCTION_INFO(C_WrapKey) ++ #ifdef CK_NEED_ARG_LIST ++ ( ++- CK_SESSION_HANDLE hSession, /* the session's handle */ ++- CK_MECHANISM_PTR pMechanism, /* the wrapping mechanism */ ++- CK_OBJECT_HANDLE hWrappingKey, /* wrapping key */ ++- CK_OBJECT_HANDLE hKey, /* key to be wrapped */ ++- CK_BYTE_PTR pWrappedKey, /* gets wrapped key */ ++- CK_ULONG_PTR pulWrappedKeyLen /* gets wrapped key size */ ++-); +++ CK_SESSION_HANDLE hSession, /* the session's handle */ +++ CK_MECHANISM_PTR pMechanism, /* the wrapping mechanism */ +++ CK_OBJECT_HANDLE hWrappingKey, /* wrapping key */ +++ CK_OBJECT_HANDLE hKey, /* key to be wrapped */ +++ CK_BYTE_PTR pWrappedKey, /* gets wrapped key */ +++ CK_ULONG_PTR pulWrappedKeyLen /* gets wrapped key size */ +++ ); ++ #endif ++ ++- ++ /* C_UnwrapKey unwraps (decrypts) a wrapped key, creating a new ++- * key object. ++- */ +++ * key object. */ ++ CK_PKCS11_FUNCTION_INFO(C_UnwrapKey) ++ #ifdef CK_NEED_ARG_LIST ++ ( ++- CK_SESSION_HANDLE hSession, /* session's handle */ ++- CK_MECHANISM_PTR pMechanism, /* unwrapping mech. */ ++- CK_OBJECT_HANDLE hUnwrappingKey, /* unwrapping key */ ++- CK_BYTE_PTR pWrappedKey, /* the wrapped key */ ++- CK_ULONG ulWrappedKeyLen, /* wrapped key len */ ++- CK_ATTRIBUTE_PTR pTemplate, /* new key template */ ++- CK_ULONG ulAttributeCount, /* template length */ ++- CK_OBJECT_HANDLE_PTR phKey /* gets new handle */ ++-); +++ CK_SESSION_HANDLE hSession, /* session's handle */ +++ CK_MECHANISM_PTR pMechanism, /* unwrapping mech. */ +++ CK_OBJECT_HANDLE hUnwrappingKey, /* unwrapping key */ +++ CK_BYTE_PTR pWrappedKey, /* the wrapped key */ +++ CK_ULONG ulWrappedKeyLen, /* wrapped key len */ +++ CK_ATTRIBUTE_PTR pTemplate, /* new key template */ +++ CK_ULONG ulAttributeCount, /* template length */ +++ CK_OBJECT_HANDLE_PTR phKey /* gets new handle */ +++ ); ++ #endif ++ ++- ++ /* C_DeriveKey derives a key from a base key, creating a new key ++- * object. ++- */ +++ * object. */ ++ CK_PKCS11_FUNCTION_INFO(C_DeriveKey) ++ #ifdef CK_NEED_ARG_LIST ++ ( ++- CK_SESSION_HANDLE hSession, /* session's handle */ ++- CK_MECHANISM_PTR pMechanism, /* key deriv. mech. */ ++- CK_OBJECT_HANDLE hBaseKey, /* base key */ ++- CK_ATTRIBUTE_PTR pTemplate, /* new key template */ ++- CK_ULONG ulAttributeCount, /* template length */ ++- CK_OBJECT_HANDLE_PTR phKey /* gets new handle */ ++-); +++ CK_SESSION_HANDLE hSession, /* session's handle */ +++ CK_MECHANISM_PTR pMechanism, /* key deriv. mech. */ +++ CK_OBJECT_HANDLE hBaseKey, /* base key */ +++ CK_ATTRIBUTE_PTR pTemplate, /* new key template */ +++ CK_ULONG ulAttributeCount, /* template length */ +++ CK_OBJECT_HANDLE_PTR phKey /* gets new handle */ +++ ); ++ #endif ++ ++- ++- ++ /* Random number generation */ ++ ++ /* C_SeedRandom mixes additional seed material into the token's ++- * random number generator. ++- */ +++ * random number generator. */ ++ CK_PKCS11_FUNCTION_INFO(C_SeedRandom) ++ #ifdef CK_NEED_ARG_LIST ++ ( ++- CK_SESSION_HANDLE hSession, /* the session's handle */ ++- CK_BYTE_PTR pSeed, /* the seed material */ ++- CK_ULONG ulSeedLen /* length of seed material */ ++-); +++ CK_SESSION_HANDLE hSession, /* the session's handle */ +++ CK_BYTE_PTR pSeed, /* the seed material */ +++ CK_ULONG ulSeedLen /* length of seed material */ +++ ); ++ #endif ++ ++- ++ /* C_GenerateRandom generates random data. */ ++ CK_PKCS11_FUNCTION_INFO(C_GenerateRandom) ++ #ifdef CK_NEED_ARG_LIST ++ ( ++- CK_SESSION_HANDLE hSession, /* the session's handle */ ++- CK_BYTE_PTR RandomData, /* receives the random data */ ++- CK_ULONG ulRandomLen /* # of bytes to generate */ ++-); +++ CK_SESSION_HANDLE hSession, /* the session's handle */ +++ CK_BYTE_PTR RandomData, /* receives the random data */ +++ CK_ULONG ulRandomLen /* # of bytes to generate */ +++ ); ++ #endif ++ ++- ++- ++ /* Parallel function management */ ++ ++ /* C_GetFunctionStatus is a legacy function; it obtains an ++ * updated status of a function running in parallel with an ++- * application. ++- */ +++ * application. */ ++ CK_PKCS11_FUNCTION_INFO(C_GetFunctionStatus) ++ #ifdef CK_NEED_ARG_LIST ++ ( ++- CK_SESSION_HANDLE hSession /* the session's handle */ ++-); +++ CK_SESSION_HANDLE hSession /* the session's handle */ +++ ); ++ #endif ++ ++- ++ /* C_CancelFunction is a legacy function; it cancels a function ++- * running in parallel. ++- */ +++ * running in parallel. */ ++ CK_PKCS11_FUNCTION_INFO(C_CancelFunction) ++ #ifdef CK_NEED_ARG_LIST ++ ( ++- CK_SESSION_HANDLE hSession /* the session's handle */ ++-); +++ CK_SESSION_HANDLE hSession /* the session's handle */ +++ ); ++ #endif ++ +++/* Functions added in for PKCS #11 Version 2.01 or later */ ++ ++ /* C_WaitForSlotEvent waits for a slot event (token insertion, ++- * removal, etc.) to occur. ++- */ +++ * removal, etc.) to occur. */ ++ CK_PKCS11_FUNCTION_INFO(C_WaitForSlotEvent) ++ #ifdef CK_NEED_ARG_LIST ++ ( ++- CK_FLAGS flags, /* blocking/nonblocking flag */ ++- CK_SLOT_ID_PTR pSlot, /* location that receives the slot ID */ ++- CK_VOID_PTR pRserved /* reserved. Should be NULL_PTR */ ++-); +++ CK_FLAGS flags, /* blocking/nonblocking flag */ +++ CK_SLOT_ID_PTR pSlot, /* location that receives the slot ID */ +++ CK_VOID_PTR pRserved /* reserved. Should be NULL_PTR */ +++ ); ++ #endif ++ ++-#ifndef CK_PKCS11_2_0_ONLY ++-/* C_GetInterfaceList returns all the interfaces supported by the module*/ +++#if defined(CK_PKCS11_3_0) && !defined(CK_PKCS11_2_0_ONLY) ++ CK_PKCS11_FUNCTION_INFO(C_GetInterfaceList) ++ #ifdef CK_NEED_ARG_LIST ++ ( ++- CK_INTERFACE_PTR pInterfacesList, /* returned interfaces */ ++- CK_ULONG_PTR pulCount /* number of interfaces returned */ ++-); +++ CK_INTERFACE_PTR interfaces, +++ CK_ULONG_PTR pulCount); ++ #endif ++ ++-/* C_GetInterface returns a specific interface from the module. */ ++ CK_PKCS11_FUNCTION_INFO(C_GetInterface) ++ #ifdef CK_NEED_ARG_LIST ++ ( ++- CK_UTF8CHAR_PTR pInterfaceName, /* name of the interface */ ++- CK_VERSION_PTR pVersion, /* version of the interface */ ++- CK_INTERFACE_PTR_PTR ppInterface, /* returned interface */ ++- CK_FLAGS flags /* flags controlling the semantics ++- * of the interface */ ++-); +++ CK_UTF8CHAR_PTR pInterfaceName, +++ CK_VERSION_PTR pVersion, +++ CK_INTERFACE_PTR_PTR ppInterface, +++ CK_FLAGS flags); ++ #endif ++ ++ CK_PKCS11_FUNCTION_INFO(C_LoginUser) ++ #ifdef CK_NEED_ARG_LIST ++ ( ++- CK_SESSION_HANDLE hSession, /* the session's handle */ ++- CK_USER_TYPE userType, /* the user type */ ++- CK_UTF8CHAR_PTR pPin, /* the user's PIN */ ++- CK_ULONG ulPinLen, /* the length of the PIN */ ++- CK_UTF8CHAR_PTR pUsername, /* the user's name */ ++- CK_ULONG ulUsernameLen /*the length of the user's name */ ++-); +++ CK_SESSION_HANDLE hSession, +++ CK_USER_TYPE userType, +++ CK_CHAR_PTR pPin, +++ CK_ULONG ulPinLen, +++ CK_UTF8CHAR_PTR pUsername, +++ CK_ULONG ulUsernameLen); ++ #endif ++ ++ CK_PKCS11_FUNCTION_INFO(C_SessionCancel) ++ #ifdef CK_NEED_ARG_LIST ++ ( ++- CK_SESSION_HANDLE hSession, /* the session's handle */ ++- CK_FLAGS flags /* flags control which sessions are cancelled */ ++-); +++ CK_SESSION_HANDLE hSession, +++ CK_FLAGS flags); ++ #endif ++ ++ CK_PKCS11_FUNCTION_INFO(C_MessageEncryptInit) ++ #ifdef CK_NEED_ARG_LIST ++ ( ++- CK_SESSION_HANDLE hSession, /* the session's handle */ ++- CK_MECHANISM_PTR pMechanism, /* the encryption mechanism */ ++- CK_OBJECT_HANDLE hKey /* handle of encryption key */ ++-); +++ CK_SESSION_HANDLE hSession, +++ CK_MECHANISM_PTR pMechanism, +++ CK_OBJECT_HANDLE hKey); ++ #endif ++ ++ CK_PKCS11_FUNCTION_INFO(C_EncryptMessage) ++ #ifdef CK_NEED_ARG_LIST ++ ( ++- CK_SESSION_HANDLE hSession, /* the session's handle */ ++- CK_VOID_PTR pParameter, /* message specific parameter */ ++- CK_ULONG ulParameterLen, /* length of message specific parameter */ ++- CK_BYTE_PTR pAssociatedData, /* AEAD Associated data */ ++- CK_ULONG ulAssociatedDataLen, /* AEAD Associated data length */ ++- CK_BYTE_PTR pPlaintext, /* plain text */ ++- CK_ULONG ulPlaintextLen, /* plain text length */ ++- CK_BYTE_PTR pCiphertext, /* gets cipher text */ ++- CK_ULONG_PTR pulCiphertextLen /* gets cipher text length */ ++-); +++ CK_SESSION_HANDLE hSession, +++ CK_VOID_PTR pParameter, +++ CK_ULONG ulParameterLen, +++ CK_BYTE_PTR pAssociatedData, +++ CK_ULONG ulAssociatedDataLen, +++ CK_BYTE_PTR pPlaintext, +++ CK_ULONG ulPlaintextLen, +++ CK_BYTE_PTR pCiphertext, +++ CK_ULONG_PTR pulCiphertextLen); ++ #endif ++ ++ CK_PKCS11_FUNCTION_INFO(C_EncryptMessageBegin) ++ #ifdef CK_NEED_ARG_LIST ++ ( ++- CK_SESSION_HANDLE hSession, /* the session's handle */ ++- CK_VOID_PTR pParameter, /* message specific parameter */ ++- CK_ULONG ulParameterLen, /* length of message specific parameter */ ++- CK_BYTE_PTR pAssociatedData, /* AEAD Associated data */ ++- CK_ULONG ulAssociatedDataLen /* AEAD Associated data length */ ++-); +++ CK_SESSION_HANDLE hSession, +++ CK_VOID_PTR pParameter, +++ CK_ULONG ulParameterLen, +++ CK_BYTE_PTR pAssociatedData, +++ CK_ULONG ulAssociatedDataLen); ++ #endif ++ ++ CK_PKCS11_FUNCTION_INFO(C_EncryptMessageNext) ++ #ifdef CK_NEED_ARG_LIST ++ ( ++- CK_SESSION_HANDLE hSession, /* the session's handle */ ++- CK_VOID_PTR pParameter, /* message specific parameter */ ++- CK_ULONG ulParameterLen, /* length of message specific parameter */ ++- CK_BYTE_PTR pPlaintextPart, /* plain text */ ++- CK_ULONG ulPlaintextPartLen, /* plain text length */ ++- CK_BYTE_PTR pCiphertextPart, /* gets cipher text */ ++- CK_ULONG_PTR pulCiphertextPartLen, /* gets cipher text length */ ++- CK_FLAGS flags /* multi mode flag */ ++-); +++ CK_SESSION_HANDLE hSession, +++ CK_VOID_PTR pParameter, +++ CK_ULONG ulParameterLen, +++ CK_BYTE_PTR pPlaintextPart, +++ CK_ULONG ulPlaintextPartLen, +++ CK_BYTE_PTR pCiphertextPart, +++ CK_ULONG_PTR pulCiphertextPartLen, +++ CK_FLAGS flags); ++ #endif ++ ++ CK_PKCS11_FUNCTION_INFO(C_MessageEncryptFinal) ++ #ifdef CK_NEED_ARG_LIST ++ ( ++- CK_SESSION_HANDLE hSession /* the session's handle */ ++-); +++ CK_SESSION_HANDLE hSession); ++ #endif ++ ++ CK_PKCS11_FUNCTION_INFO(C_MessageDecryptInit) ++ #ifdef CK_NEED_ARG_LIST ++ ( ++- CK_SESSION_HANDLE hSession, /* the session's handle */ ++- CK_MECHANISM_PTR pMechanism, /* the decryption mechanism */ ++- CK_OBJECT_HANDLE hKey /* handle of decryption key */ ++-); +++ CK_SESSION_HANDLE hSession, +++ CK_MECHANISM_PTR pMechanism, +++ CK_OBJECT_HANDLE hKey); ++ #endif ++ ++ CK_PKCS11_FUNCTION_INFO(C_DecryptMessage) ++ #ifdef CK_NEED_ARG_LIST ++ ( ++- CK_SESSION_HANDLE hSession, /* the session's handle */ ++- CK_VOID_PTR pParameter, /* message specific parameter */ ++- CK_ULONG ulParameterLen, /* length of message specific parameter */ ++- CK_BYTE_PTR pAssociatedData, /* AEAD Associated data */ ++- CK_ULONG ulAssociatedDataLen, /* AEAD Associated data length */ ++- CK_BYTE_PTR pCiphertext, /* cipher text */ ++- CK_ULONG ulCiphertextLen, /* cipher text length */ ++- CK_BYTE_PTR pPlaintext, /* gets plain text */ ++- CK_ULONG_PTR pulPlaintextLen /* gets plain text length */ ++-); +++ CK_SESSION_HANDLE hSession, +++ CK_VOID_PTR pParameter, +++ CK_ULONG ulParameterLen, +++ CK_BYTE_PTR pAssociatedData, +++ CK_ULONG ulAssociatedDataLen, +++ CK_BYTE_PTR pCiphertext, +++ CK_ULONG ulCiphertextLen, +++ CK_BYTE_PTR pPlaintext, +++ CK_ULONG_PTR pulPlaintextLen); ++ #endif ++ ++ CK_PKCS11_FUNCTION_INFO(C_DecryptMessageBegin) ++ #ifdef CK_NEED_ARG_LIST ++ ( ++- CK_SESSION_HANDLE hSession, /* the session's handle */ ++- CK_VOID_PTR pParameter, /* message specific parameter */ ++- CK_ULONG ulParameterLen, /* length of message specific parameter */ ++- CK_BYTE_PTR pAssociatedData, /* AEAD Associated data */ ++- CK_ULONG ulAssociatedDataLen /* AEAD Associated data length */ ++-); +++ CK_SESSION_HANDLE hSession, +++ CK_VOID_PTR pParameter, +++ CK_ULONG ulParameterLen, +++ CK_BYTE_PTR pAssociatedData, +++ CK_ULONG ulAssociatedDataLen); ++ #endif ++ ++ CK_PKCS11_FUNCTION_INFO(C_DecryptMessageNext) ++ #ifdef CK_NEED_ARG_LIST ++ ( ++- CK_SESSION_HANDLE hSession, /* the session's handle */ ++- CK_VOID_PTR pParameter, /* message specific parameter */ ++- CK_ULONG ulParameterLen, /* length of message specific parameter */ ++- CK_BYTE_PTR pCiphertextPart, /* cipher text */ ++- CK_ULONG ulCiphertextPartLen, /* cipher text length */ ++- CK_BYTE_PTR pPlaintextPart, /* gets plain text */ ++- CK_ULONG_PTR pulPlaintextPartLen, /* gets plain text length */ ++- CK_FLAGS flags /* multi mode flag */ ++-); +++ CK_SESSION_HANDLE hSession, +++ CK_VOID_PTR pParameter, +++ CK_ULONG ulParameterLen, +++ CK_BYTE_PTR pCiphertextPart, +++ CK_ULONG ulCiphertextPartLen, +++ CK_BYTE_PTR pPlaintextPart, +++ CK_ULONG_PTR pulPlaintextPartLen, +++ CK_FLAGS flags); ++ #endif ++ ++ CK_PKCS11_FUNCTION_INFO(C_MessageDecryptFinal) ++ #ifdef CK_NEED_ARG_LIST ++ ( ++- CK_SESSION_HANDLE hSession /* the session's handle */ ++-); +++ CK_SESSION_HANDLE hSession); ++ #endif ++ ++ CK_PKCS11_FUNCTION_INFO(C_MessageSignInit) ++ #ifdef CK_NEED_ARG_LIST ++ ( ++- CK_SESSION_HANDLE hSession, /* the session's handle */ ++- CK_MECHANISM_PTR pMechanism, /* the signing mechanism */ ++- CK_OBJECT_HANDLE hKey /* handle of signing key */ ++-); +++ CK_SESSION_HANDLE hSession, +++ CK_MECHANISM_PTR pMechanism, +++ CK_OBJECT_HANDLE hKey); ++ #endif ++ ++ CK_PKCS11_FUNCTION_INFO(C_SignMessage) ++ #ifdef CK_NEED_ARG_LIST ++ ( ++- CK_SESSION_HANDLE hSession, /* the session's handle */ ++- CK_VOID_PTR pParameter, /* message specific parameter */ ++- CK_ULONG ulParameterLen, /* length of message specific parameter */ ++- CK_BYTE_PTR pData, /* data to sign */ ++- CK_ULONG ulDataLen, /* data to sign length */ ++- CK_BYTE_PTR pSignature, /* gets signature */ ++- CK_ULONG_PTR pulSignatureLen /* gets signature length */ ++-); +++ CK_SESSION_HANDLE hSession, +++ CK_VOID_PTR pParameter, +++ CK_ULONG ulParameterLen, +++ CK_BYTE_PTR pData, +++ CK_ULONG ulDataLen, +++ +++ CK_BYTE_PTR pSignature, +++ CK_ULONG_PTR pulSignatureLen); ++ #endif ++ ++ CK_PKCS11_FUNCTION_INFO(C_SignMessageBegin) ++ #ifdef CK_NEED_ARG_LIST ++ ( ++- CK_SESSION_HANDLE hSession, /* the session's handle */ ++- CK_VOID_PTR pParameter, /* message specific parameter */ ++- CK_ULONG ulParameterLen /* length of message specific parameter */ ++-); +++ CK_SESSION_HANDLE hSession, +++ CK_VOID_PTR pParameter, +++ CK_ULONG ulParameterLen); ++ #endif ++ ++ CK_PKCS11_FUNCTION_INFO(C_SignMessageNext) ++ #ifdef CK_NEED_ARG_LIST ++ ( ++- CK_SESSION_HANDLE hSession, /* the session's handle */ ++- CK_VOID_PTR pParameter, /* message specific parameter */ ++- CK_ULONG ulParameterLen, /* length of message specific parameter */ ++- CK_BYTE_PTR pData, /* data to sign */ ++- CK_ULONG ulDataLen, /* data to sign length */ ++- CK_BYTE_PTR pSignature, /* gets signature */ ++- CK_ULONG_PTR pulSignatureLen /* gets signature length */ ++-); +++ CK_SESSION_HANDLE hSession, +++ CK_VOID_PTR pParameter, +++ CK_ULONG ulParameterLen, +++ CK_BYTE_PTR pData, +++ CK_ULONG ulDataLen, +++ CK_BYTE_PTR pSignature, +++ CK_ULONG_PTR pulSignatureLen); ++ #endif ++ ++ CK_PKCS11_FUNCTION_INFO(C_MessageSignFinal) ++ #ifdef CK_NEED_ARG_LIST ++ ( ++- CK_SESSION_HANDLE hSession /* the session's handle */ ++-); +++ CK_SESSION_HANDLE hSession); ++ #endif ++ ++ CK_PKCS11_FUNCTION_INFO(C_MessageVerifyInit) ++ #ifdef CK_NEED_ARG_LIST ++ ( ++- CK_SESSION_HANDLE hSession, /* the session's handle */ ++- CK_MECHANISM_PTR pMechanism, /* the signing mechanism */ ++- CK_OBJECT_HANDLE hKey /* handle of signing key */ ++-); +++ CK_SESSION_HANDLE hSession, +++ CK_MECHANISM_PTR pMechanism, +++ CK_OBJECT_HANDLE hKey); ++ #endif ++ ++ CK_PKCS11_FUNCTION_INFO(C_VerifyMessage) ++ #ifdef CK_NEED_ARG_LIST ++ ( ++- CK_SESSION_HANDLE hSession, /* the session's handle */ ++- CK_VOID_PTR pParameter, /* message specific parameter */ ++- CK_ULONG ulParameterLen, /* length of message specific parameter */ ++- CK_BYTE_PTR pData, /* data to sign */ ++- CK_ULONG ulDataLen, /* data to sign length */ ++- CK_BYTE_PTR pSignature, /* signature */ ++- CK_ULONG ulSignatureLen /* signature length */ ++-); +++ CK_SESSION_HANDLE hSession, +++ CK_VOID_PTR pParameter, +++ CK_ULONG ulParameterLen, +++ CK_BYTE_PTR pData, +++ CK_ULONG ulDataLen, +++ CK_BYTE_PTR pSignature, +++ CK_ULONG ulSignatureLen); ++ #endif ++ ++ CK_PKCS11_FUNCTION_INFO(C_VerifyMessageBegin) ++ #ifdef CK_NEED_ARG_LIST ++ ( ++- CK_SESSION_HANDLE hSession, /* the session's handle */ ++- CK_VOID_PTR pParameter, /* message specific parameter */ ++- CK_ULONG ulParameterLen /* length of message specific parameter */ ++-); +++ CK_SESSION_HANDLE hSession, +++ CK_VOID_PTR pParameter, +++ CK_ULONG ulParameterLen); ++ #endif ++ ++ CK_PKCS11_FUNCTION_INFO(C_VerifyMessageNext) ++ #ifdef CK_NEED_ARG_LIST ++ ( ++- CK_SESSION_HANDLE hSession, /* the session's handle */ ++- CK_VOID_PTR pParameter, /* message specific parameter */ ++- CK_ULONG ulParameterLen, /* length of message specific parameter */ ++- CK_BYTE_PTR pData, /* data to sign */ ++- CK_ULONG ulDataLen, /* data to sign length */ ++- CK_BYTE_PTR pSignature, /* signature */ ++- CK_ULONG ulSignatureLen /* signature length */ ++-); +++ CK_SESSION_HANDLE hSession, +++ CK_VOID_PTR pParameter, +++ CK_ULONG ulParameterLen, +++ CK_BYTE_PTR pData, +++ CK_ULONG ulDataLen, +++ CK_BYTE_PTR pSignature, +++ CK_ULONG ulSignatureLen); ++ #endif ++ ++ CK_PKCS11_FUNCTION_INFO(C_MessageVerifyFinal) ++ #ifdef CK_NEED_ARG_LIST ++ ( ++- CK_SESSION_HANDLE hSession /* the session's handle */ ++-); +++ CK_SESSION_HANDLE hSession); ++ #endif ++ ++-#endif /* CK_PKCS11_2_0_ONLY */ ++- +++#endif ++--- /dev/null +++++ b/src/jdk.crypto.cryptoki/share/native/libj2pkcs11/pkcs11n.h ++@@ -0,0 +1,672 @@ +++/* This Source Code Form is subject to the terms of the Mozilla Public +++ * License, v. 2.0. If a copy of the MPL was not distributed with this +++ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ +++ +++#ifndef _PKCS11N_H_ +++#define _PKCS11N_H_ +++ +++/* +++ * pkcs11n.h +++ * +++ * This file contains the NSS-specific type definitions for Cryptoki +++ * (PKCS#11). +++ */ +++ +++/* +++ * NSSCK_VENDOR_NSS +++ * +++ * Cryptoki reserves the high half of all the number spaces for +++ * vendor-defined use. I'd like to keep all of our NSS- +++ * specific values together, but not in the oh-so-obvious +++ * 0x80000001, 0x80000002, etc. area. So I've picked an offset, +++ * and constructed values for the beginnings of our spaces. +++ * +++ * Note that some "historical" Netscape values don't fall within +++ * this range. +++ */ +++#define NSSCK_VENDOR_NSS 0x4E534350 /* NSCP */ +++ +++/* +++ * NSS-defined object classes +++ * +++ */ +++#define CKO_NSS (CKO_VENDOR_DEFINED | NSSCK_VENDOR_NSS) +++ +++#define CKO_NSS_CRL (CKO_NSS + 1) +++#define CKO_NSS_SMIME (CKO_NSS + 2) +++#define CKO_NSS_TRUST (CKO_NSS + 3) +++#define CKO_NSS_BUILTIN_ROOT_LIST (CKO_NSS + 4) +++#define CKO_NSS_NEWSLOT (CKO_NSS + 5) +++#define CKO_NSS_DELSLOT (CKO_NSS + 6) +++ +++/* +++ * NSS-defined key types +++ * +++ */ +++#define CKK_NSS (CKK_VENDOR_DEFINED | NSSCK_VENDOR_NSS) +++ +++#define CKK_NSS_PKCS8 (CKK_NSS + 1) +++ +++#define CKK_NSS_JPAKE_ROUND1 (CKK_NSS + 2) +++#define CKK_NSS_JPAKE_ROUND2 (CKK_NSS + 3) +++ +++#define CKK_NSS_CHACHA20 (CKK_NSS + 4) +++ +++/* +++ * NSS-defined certificate types +++ * +++ */ +++#define CKC_NSS (CKC_VENDOR_DEFINED | NSSCK_VENDOR_NSS) +++ +++/* FAKE PKCS #11 defines */ +++#define CKA_DIGEST 0x81000000L +++#define CKA_NSS_MESSAGE 0x82000000L +++#define CKA_NSS_MESSAGE_MASK 0xff000000L +++#define CKA_FLAGS_ONLY 0 /* CKA_CLASS */ +++ +++/* +++ * NSS-defined object attributes +++ * +++ */ +++#define CKA_NSS (CKA_VENDOR_DEFINED | NSSCK_VENDOR_NSS) +++ +++#define CKA_NSS_URL (CKA_NSS + 1) +++#define CKA_NSS_EMAIL (CKA_NSS + 2) +++#define CKA_NSS_SMIME_INFO (CKA_NSS + 3) +++#define CKA_NSS_SMIME_TIMESTAMP (CKA_NSS + 4) +++#define CKA_NSS_PKCS8_SALT (CKA_NSS + 5) +++#define CKA_NSS_PASSWORD_CHECK (CKA_NSS + 6) +++#define CKA_NSS_EXPIRES (CKA_NSS + 7) +++#define CKA_NSS_KRL (CKA_NSS + 8) +++ +++#define CKA_NSS_PQG_COUNTER (CKA_NSS + 20) +++#define CKA_NSS_PQG_SEED (CKA_NSS + 21) +++#define CKA_NSS_PQG_H (CKA_NSS + 22) +++#define CKA_NSS_PQG_SEED_BITS (CKA_NSS + 23) +++#define CKA_NSS_MODULE_SPEC (CKA_NSS + 24) +++#define CKA_NSS_OVERRIDE_EXTENSIONS (CKA_NSS + 25) +++ +++#define CKA_NSS_JPAKE_SIGNERID (CKA_NSS + 26) +++#define CKA_NSS_JPAKE_PEERID (CKA_NSS + 27) +++#define CKA_NSS_JPAKE_GX1 (CKA_NSS + 28) +++#define CKA_NSS_JPAKE_GX2 (CKA_NSS + 29) +++#define CKA_NSS_JPAKE_GX3 (CKA_NSS + 30) +++#define CKA_NSS_JPAKE_GX4 (CKA_NSS + 31) +++#define CKA_NSS_JPAKE_X2 (CKA_NSS + 32) +++#define CKA_NSS_JPAKE_X2S (CKA_NSS + 33) +++ +++#define CKA_NSS_MOZILLA_CA_POLICY (CKA_NSS + 34) +++#define CKA_NSS_SERVER_DISTRUST_AFTER (CKA_NSS + 35) +++#define CKA_NSS_EMAIL_DISTRUST_AFTER (CKA_NSS + 36) +++ +++/* +++ * Trust attributes: +++ * +++ * If trust goes standard, these probably will too. So I'll +++ * put them all in one place. +++ */ +++ +++#define CKA_TRUST (CKA_NSS + 0x2000) +++ +++/* "Usage" key information */ +++#define CKA_TRUST_DIGITAL_SIGNATURE (CKA_TRUST + 1) +++#define CKA_TRUST_NON_REPUDIATION (CKA_TRUST + 2) +++#define CKA_TRUST_KEY_ENCIPHERMENT (CKA_TRUST + 3) +++#define CKA_TRUST_DATA_ENCIPHERMENT (CKA_TRUST + 4) +++#define CKA_TRUST_KEY_AGREEMENT (CKA_TRUST + 5) +++#define CKA_TRUST_KEY_CERT_SIGN (CKA_TRUST + 6) +++#define CKA_TRUST_CRL_SIGN (CKA_TRUST + 7) +++ +++/* "Purpose" trust information */ +++#define CKA_TRUST_SERVER_AUTH (CKA_TRUST + 8) +++#define CKA_TRUST_CLIENT_AUTH (CKA_TRUST + 9) +++#define CKA_TRUST_CODE_SIGNING (CKA_TRUST + 10) +++#define CKA_TRUST_EMAIL_PROTECTION (CKA_TRUST + 11) +++#define CKA_TRUST_IPSEC_END_SYSTEM (CKA_TRUST + 12) +++#define CKA_TRUST_IPSEC_TUNNEL (CKA_TRUST + 13) +++#define CKA_TRUST_IPSEC_USER (CKA_TRUST + 14) +++#define CKA_TRUST_TIME_STAMPING (CKA_TRUST + 15) +++#define CKA_TRUST_STEP_UP_APPROVED (CKA_TRUST + 16) +++ +++#define CKA_CERT_SHA1_HASH (CKA_TRUST + 100) +++#define CKA_CERT_MD5_HASH (CKA_TRUST + 101) +++ +++/* NSS trust stuff */ +++ +++/* HISTORICAL: define used to pass in the database key for DSA private keys */ +++#define CKA_NSS_DB 0xD5A0DB00L +++#define CKA_NSS_TRUST 0x80000001L +++ +++/* FAKE PKCS #11 defines */ +++#define CKM_FAKE_RANDOM 0x80000efeUL +++#define CKM_INVALID_MECHANISM 0xffffffffUL +++#define CKT_INVALID_TYPE 0xffffffffUL +++ +++/* +++ * NSS-defined crypto mechanisms +++ * +++ */ +++#define CKM_NSS (CKM_VENDOR_DEFINED | NSSCK_VENDOR_NSS) +++ +++#define CKM_NSS_AES_KEY_WRAP (CKM_NSS + 1) +++#define CKM_NSS_AES_KEY_WRAP_PAD (CKM_NSS + 2) +++ +++/* HKDF key derivation mechanisms. See CK_NSS_HKDFParams for documentation. */ +++#define CKM_NSS_HKDF_SHA1 (CKM_NSS + 3) +++#define CKM_NSS_HKDF_SHA256 (CKM_NSS + 4) +++#define CKM_NSS_HKDF_SHA384 (CKM_NSS + 5) +++#define CKM_NSS_HKDF_SHA512 (CKM_NSS + 6) +++ +++/* J-PAKE round 1 key generation mechanisms. +++ * +++ * Required template attributes: CKA_PRIME, CKA_SUBPRIME, CKA_BASE, +++ * CKA_NSS_JPAKE_SIGNERID +++ * Output key type: CKK_NSS_JPAKE_ROUND1 +++ * Output key class: CKO_PRIVATE_KEY +++ * Parameter type: CK_NSS_JPAKERound1Params +++ * +++ */ +++#define CKM_NSS_JPAKE_ROUND1_SHA1 (CKM_NSS + 7) +++#define CKM_NSS_JPAKE_ROUND1_SHA256 (CKM_NSS + 8) +++#define CKM_NSS_JPAKE_ROUND1_SHA384 (CKM_NSS + 9) +++#define CKM_NSS_JPAKE_ROUND1_SHA512 (CKM_NSS + 10) +++ +++/* J-PAKE round 2 key derivation mechanisms. +++ * +++ * Required template attributes: CKA_NSS_JPAKE_PEERID +++ * Input key type: CKK_NSS_JPAKE_ROUND1 +++ * Output key type: CKK_NSS_JPAKE_ROUND2 +++ * Output key class: CKO_PRIVATE_KEY +++ * Parameter type: CK_NSS_JPAKERound2Params +++ */ +++#define CKM_NSS_JPAKE_ROUND2_SHA1 (CKM_NSS + 11) +++#define CKM_NSS_JPAKE_ROUND2_SHA256 (CKM_NSS + 12) +++#define CKM_NSS_JPAKE_ROUND2_SHA384 (CKM_NSS + 13) +++#define CKM_NSS_JPAKE_ROUND2_SHA512 (CKM_NSS + 14) +++ +++/* J-PAKE final key material derivation mechanisms +++ * +++ * Input key type: CKK_NSS_JPAKE_ROUND2 +++ * Output key type: CKK_GENERIC_SECRET +++ * Output key class: CKO_SECRET_KEY +++ * Parameter type: CK_NSS_JPAKEFinalParams +++ * +++ * You must apply a KDF (e.g. CKM_NSS_HKDF_*) to resultant keying material +++ * to get a key with uniformly distributed bits. +++ */ +++#define CKM_NSS_JPAKE_FINAL_SHA1 (CKM_NSS + 15) +++#define CKM_NSS_JPAKE_FINAL_SHA256 (CKM_NSS + 16) +++#define CKM_NSS_JPAKE_FINAL_SHA384 (CKM_NSS + 17) +++#define CKM_NSS_JPAKE_FINAL_SHA512 (CKM_NSS + 18) +++ +++/* Constant-time MAC mechanisms: +++ * +++ * These operations verify a padded, MAC-then-encrypt block of data in +++ * constant-time. Because of the order of operations, the padding bytes are not +++ * protected by the MAC. However, disclosing the value of the padding bytes +++ * gives an attacker the ability to decrypt ciphertexts. Such disclosure can be +++ * as subtle as taking slightly less time to perform the MAC when the padding +++ * is one byte longer. See https://www.isg.rhul.ac.uk/tls/ +++ * +++ * CKM_NSS_HMAC_CONSTANT_TIME: performs an HMAC authentication. +++ * CKM_NSS_SSL3_MAC_CONSTANT_TIME: performs an authentication with SSLv3 MAC. +++ * +++ * Parameter type: CK_NSS_MAC_CONSTANT_TIME_PARAMS +++ */ +++#define CKM_NSS_HMAC_CONSTANT_TIME (CKM_NSS + 19) +++#define CKM_NSS_SSL3_MAC_CONSTANT_TIME (CKM_NSS + 20) +++ +++/* TLS 1.2 mechanisms */ +++#define CKM_NSS_TLS_PRF_GENERAL_SHA256 (CKM_NSS + 21) +++#define CKM_NSS_TLS_MASTER_KEY_DERIVE_SHA256 (CKM_NSS + 22) +++#define CKM_NSS_TLS_KEY_AND_MAC_DERIVE_SHA256 (CKM_NSS + 23) +++#define CKM_NSS_TLS_MASTER_KEY_DERIVE_DH_SHA256 (CKM_NSS + 24) +++ +++/* TLS extended master secret derivation */ +++#define CKM_NSS_TLS_EXTENDED_MASTER_KEY_DERIVE (CKM_NSS + 25) +++#define CKM_NSS_TLS_EXTENDED_MASTER_KEY_DERIVE_DH (CKM_NSS + 26) +++ +++#define CKM_NSS_CHACHA20_KEY_GEN (CKM_NSS + 27) +++#define CKM_NSS_CHACHA20_POLY1305 (CKM_NSS + 28) +++ +++/* Additional PKCS #12 PBE algorithms defined in v1.1 */ +++#define CKM_NSS_PKCS12_PBE_SHA224_HMAC_KEY_GEN (CKM_NSS + 29) +++#define CKM_NSS_PKCS12_PBE_SHA256_HMAC_KEY_GEN (CKM_NSS + 30) +++#define CKM_NSS_PKCS12_PBE_SHA384_HMAC_KEY_GEN (CKM_NSS + 31) +++#define CKM_NSS_PKCS12_PBE_SHA512_HMAC_KEY_GEN (CKM_NSS + 32) +++ +++#define CKM_NSS_CHACHA20_CTR (CKM_NSS + 33) +++ +++/* IKE mechanism (to be proposed to PKCS #11 */ +++#define CKM_NSS_IKE_PRF_PLUS_DERIVE (CKM_NSS + 34) +++#define CKM_NSS_IKE_PRF_DERIVE (CKM_NSS + 35) +++#define CKM_NSS_IKE1_PRF_DERIVE (CKM_NSS + 36) +++#define CKM_NSS_IKE1_APP_B_PRF_DERIVE (CKM_NSS + 37) +++ +++#define CKM_NSS_PUB_FROM_PRIV (CKM_NSS + 40) +++ +++/* SP800-108 NSS mechanism with support for data object derivation */ +++#define CKM_NSS_SP800_108_COUNTER_KDF_DERIVE_DATA (CKM_NSS + 42) +++#define CKM_NSS_SP800_108_FEEDBACK_KDF_DERIVE_DATA (CKM_NSS + 43) +++#define CKM_NSS_SP800_108_DOUBLE_PIPELINE_KDF_DERIVE_DATA (CKM_NSS + 44) +++ +++/* +++ * HISTORICAL: +++ * Do not attempt to use these. They are only used by NSS's internal +++ * PKCS #11 interface. Most of these are place holders for other mechanism +++ * and will change in the future. +++ */ +++#define CKM_NSS_PBE_SHA1_DES_CBC 0x80000002UL +++#define CKM_NSS_PBE_SHA1_TRIPLE_DES_CBC 0x80000003UL +++#define CKM_NSS_PBE_SHA1_40_BIT_RC2_CBC 0x80000004UL +++#define CKM_NSS_PBE_SHA1_128_BIT_RC2_CBC 0x80000005UL +++#define CKM_NSS_PBE_SHA1_40_BIT_RC4 0x80000006UL +++#define CKM_NSS_PBE_SHA1_128_BIT_RC4 0x80000007UL +++#define CKM_NSS_PBE_SHA1_FAULTY_3DES_CBC 0x80000008UL +++#define CKM_NSS_PBE_SHA1_HMAC_KEY_GEN 0x80000009UL +++#define CKM_NSS_PBE_MD5_HMAC_KEY_GEN 0x8000000aUL +++#define CKM_NSS_PBE_MD2_HMAC_KEY_GEN 0x8000000bUL +++ +++#define CKM_TLS_PRF_GENERAL 0x80000373UL +++ +++typedef struct CK_NSS_JPAKEPublicValue { +++ CK_BYTE *pGX; +++ CK_ULONG ulGXLen; +++ CK_BYTE *pGV; +++ CK_ULONG ulGVLen; +++ CK_BYTE *pR; +++ CK_ULONG ulRLen; +++} CK_NSS_JPAKEPublicValue; +++ +++typedef struct CK_NSS_JPAKERound1Params { +++ CK_NSS_JPAKEPublicValue gx1; /* out */ +++ CK_NSS_JPAKEPublicValue gx2; /* out */ +++} CK_NSS_JPAKERound1Params; +++ +++typedef struct CK_NSS_JPAKERound2Params { +++ CK_BYTE *pSharedKey; /* in */ +++ CK_ULONG ulSharedKeyLen; /* in */ +++ CK_NSS_JPAKEPublicValue gx3; /* in */ +++ CK_NSS_JPAKEPublicValue gx4; /* in */ +++ CK_NSS_JPAKEPublicValue A; /* out */ +++} CK_NSS_JPAKERound2Params; +++ +++typedef struct CK_NSS_JPAKEFinalParams { +++ CK_NSS_JPAKEPublicValue B; /* in */ +++} CK_NSS_JPAKEFinalParams; +++ +++/* macAlg: the MAC algorithm to use. This determines the hash function used in +++ * the HMAC/SSLv3 MAC calculations. +++ * ulBodyTotalLen: the total length of the data, including padding bytes and +++ * padding length. +++ * pHeader: points to a block of data that contains additional data to +++ * authenticate. For TLS this includes the sequence number etc. For SSLv3, +++ * this also includes the initial padding bytes. +++ * +++ * NOTE: the softoken's implementation of CKM_NSS_HMAC_CONSTANT_TIME and +++ * CKM_NSS_SSL3_MAC_CONSTANT_TIME requires that the sum of ulBodyTotalLen +++ * and ulHeaderLen be much smaller than 2^32 / 8 bytes because it uses an +++ * unsigned int variable to represent the length in bits. This should not +++ * be a problem because the SSL/TLS protocol limits the size of an SSL +++ * record to something considerably less than 2^32 bytes. +++ */ +++typedef struct CK_NSS_MAC_CONSTANT_TIME_PARAMS { +++ CK_MECHANISM_TYPE macAlg; /* in */ +++ CK_ULONG ulBodyTotalLen; /* in */ +++ CK_BYTE *pHeader; /* in */ +++ CK_ULONG ulHeaderLen; /* in */ +++} CK_NSS_MAC_CONSTANT_TIME_PARAMS; +++ +++typedef struct CK_NSS_AEAD_PARAMS { +++ CK_BYTE_PTR pNonce; +++ CK_ULONG ulNonceLen; +++ CK_BYTE_PTR pAAD; +++ CK_ULONG ulAADLen; +++ CK_ULONG ulTagLen; +++} CK_NSS_AEAD_PARAMS; +++ +++/* +++ * NSS-defined return values +++ * +++ */ +++#define CKR_NSS (CKM_VENDOR_DEFINED | NSSCK_VENDOR_NSS) +++ +++#define CKR_NSS_CERTDB_FAILED (CKR_NSS + 1) +++#define CKR_NSS_KEYDB_FAILED (CKR_NSS + 2) +++ +++/* Mandatory parameter for the CKM_NSS_HKDF_* key deriviation mechanisms. +++ See RFC 5869. +++ +++ bExtract: If set, HKDF-Extract will be applied to the input key. If +++ the optional salt is given, it is used; otherwise, the salt is +++ set to a sequence of zeros equal in length to the HMAC output. +++ If bExpand is not set, then the key template given to +++ C_DeriveKey must indicate an output key size less than or equal +++ to the output size of the HMAC. +++ +++ bExpand: If set, HKDF-Expand will be applied to the input key (if +++ bExtract is not set) or to the result of HKDF-Extract (if +++ bExtract is set). Any info given in the optional pInfo field will +++ be included in the calculation. +++ +++ The size of the output key must be specified in the template passed to +++ C_DeriveKey. +++*/ +++typedef struct CK_NSS_HKDFParams { +++ CK_BBOOL bExtract; +++ CK_BYTE_PTR pSalt; +++ CK_ULONG ulSaltLen; +++ CK_BBOOL bExpand; +++ CK_BYTE_PTR pInfo; +++ CK_ULONG ulInfoLen; +++} CK_NSS_HKDFParams; +++ +++/* +++ * CK_NSS_IKE_PRF_PLUS_PARAMS is a structure that provides the parameters to +++ * the CKM_NSS_IKE_PRF_PLUS_DERIVE mechanism. +++ * The fields of the structure have the following meanings: +++ * prfMechanism underlying MAC mechanism used to generate the prf. +++ * bHasSeedKey hSeed key is present. +++ * hSeedKey optional seed from key +++ * pSeedData optional seed from data. +++ * ulSeedDataLen length of optional seed data. +++ * If no seed data is present this value is NULL. +++ */ +++typedef struct CK_NSS_IKE_PRF_PLUS_DERIVE_PARAMS { +++ CK_MECHANISM_TYPE prfMechanism; +++ CK_BBOOL bHasSeedKey; +++ CK_OBJECT_HANDLE hSeedKey; +++ CK_BYTE_PTR pSeedData; +++ CK_ULONG ulSeedDataLen; +++} CK_NSS_IKE_PRF_PLUS_DERIVE_PARAMS; +++ +++/* CK_NSS_IKE_PRF_DERIVE_PARAMS is a structure that provides the parameters to +++ * the CKM_NSS_IKE_PRF_DERIVE mechanism. +++ * +++ * The fields of the structure have the following meanings: +++ * prfMechanism underlying MAC mechanism used to generate the prf. +++ * bRekey hNewKey is present. +++ * pNi Ni value +++ * ulNiLen length of Ni +++ * pNr Nr value +++ * ulNrLen length of Nr +++ * hNewKey New key value to drive the rekey. +++ */ +++typedef struct CK_NSS_IKE_PRF_DERIVE_PARAMS { +++ CK_MECHANISM_TYPE prfMechanism; +++ CK_BBOOL bDataAsKey; +++ CK_BBOOL bRekey; +++ CK_BYTE_PTR pNi; +++ CK_ULONG ulNiLen; +++ CK_BYTE_PTR pNr; +++ CK_ULONG ulNrLen; +++ CK_OBJECT_HANDLE hNewKey; +++} CK_NSS_IKE_PRF_DERIVE_PARAMS; +++ +++/* CK_NSS_IKE1_PRF_DERIVE_PARAMS is a structure that provides the parameters +++ * to the CKM_NSS_IKE_PRF_DERIVE mechanism. +++ * +++ * The fields of the structure have the following meanings: +++ * prfMechanism underlying MAC mechanism used to generate the prf. +++ * bRekey hNewKey is present. +++ * pCKYi CKYi value +++ * ulCKYiLen length of CKYi +++ * pCKYr CKYr value +++ * ulCKYrLen length of CKYr +++ * hNewKey New key value to drive the rekey. +++ */ +++typedef struct CK_NSS_IKE1_PRF_DERIVE_PARAMS { +++ CK_MECHANISM_TYPE prfMechanism; +++ CK_BBOOL bHasPrevKey; +++ CK_OBJECT_HANDLE hKeygxy; +++ CK_OBJECT_HANDLE hPrevKey; +++ CK_BYTE_PTR pCKYi; +++ CK_ULONG ulCKYiLen; +++ CK_BYTE_PTR pCKYr; +++ CK_ULONG ulCKYrLen; +++ CK_BYTE keyNumber; +++} CK_NSS_IKE1_PRF_DERIVE_PARAMS; +++ +++/* CK_NSS_IKE1_APP_B_PRF_DERIVE_PARAMS is a structure that provides the +++ * parameters to the CKM_NSS_IKE_APP_B_PRF_DERIVE mechanism. +++ * +++ * The fields of the structure have the following meanings: +++ * prfMechanism underlying MAC mechanism used to generate the prf. +++ * bHasKeygxy hKeygxy exists +++ * hKeygxy optional key to hash in the prf +++ * pExtraData optional extra data to hash in the prf +++ * ulExtraData length of the optional extra data. +++ * +++ * CK_NSS_IKE_APP_B_PRF_DERIVE can take wither CK_NSS_IKE1_APP_B_PRF_DRIVE_PARAMS +++ * or a single CK_MECHANISM_TYPE. In the latter cases bHashKeygx is assumed to +++ * be false and ulExtraDataLen is assumed to be '0'. +++ */ +++typedef struct CK_NSS_IKE1_APP_B_PRF_DERIVE_PARAMS { +++ CK_MECHANISM_TYPE prfMechanism; +++ CK_BBOOL bHasKeygxy; +++ CK_OBJECT_HANDLE hKeygxy; +++ CK_BYTE_PTR pExtraData; +++ CK_ULONG ulExtraDataLen; +++} CK_NSS_IKE1_APP_B_PRF_DERIVE_PARAMS; +++ +++/* +++ * Parameter for the TLS extended master secret key derivation mechanisms: +++ * +++ * * CKM_NSS_TLS_EXTENDED_MASTER_KEY_DERIVE +++ * * CKM_NSS_TLS_EXTENDED_MASTER_KEY_DERIVE_DH +++ * +++ * For the TLS 1.2 PRF, the prfHashMechanism parameter determines the hash +++ * function used. For earlier versions of the PRF, set the prfHashMechanism +++ * value to CKM_TLS_PRF. +++ * +++ * The session hash input is expected to be the output of the same hash +++ * function as the PRF uses (as required by draft-ietf-tls-session-hash). So +++ * the ulSessionHashLen member must be equal the output length of the hash +++ * function specified by the prfHashMechanism member (or, for pre-TLS 1.2 PRF, +++ * the length of concatenated MD5 and SHA-1 digests). +++ * +++ */ +++typedef struct CK_NSS_TLS_EXTENDED_MASTER_KEY_DERIVE_PARAMS { +++ CK_MECHANISM_TYPE prfHashMechanism; +++ CK_BYTE_PTR pSessionHash; +++ CK_ULONG ulSessionHashLen; +++ CK_VERSION_PTR pVersion; +++} CK_NSS_TLS_EXTENDED_MASTER_KEY_DERIVE_PARAMS; +++ +++/* +++ * Trust info +++ * +++ * This isn't part of the Cryptoki standard (yet), so I'm putting +++ * all the definitions here. Some of this would move to nssckt.h +++ * if trust info were made part of the standard. In view of this +++ * possibility, I'm putting my (NSS) values in the NSS +++ * vendor space, like everything else. +++ */ +++ +++typedef CK_ULONG CK_TRUST; +++ +++/* The following trust types are defined: */ +++#define CKT_VENDOR_DEFINED 0x80000000 +++ +++#define CKT_NSS (CKT_VENDOR_DEFINED | NSSCK_VENDOR_NSS) +++ +++/* If trust goes standard, these'll probably drop out of vendor space. */ +++#define CKT_NSS_TRUSTED (CKT_NSS + 1) +++#define CKT_NSS_TRUSTED_DELEGATOR (CKT_NSS + 2) +++#define CKT_NSS_MUST_VERIFY_TRUST (CKT_NSS + 3) +++#define CKT_NSS_NOT_TRUSTED (CKT_NSS + 10) +++#define CKT_NSS_TRUST_UNKNOWN (CKT_NSS + 5) /* default */ +++ +++/* +++ * These may well remain NSS-specific; I'm only using them +++ * to cache resolution data. +++ */ +++#define CKT_NSS_VALID_DELEGATOR (CKT_NSS + 11) +++ +++/* +++ * old definitions. They still exist, but the plain meaning of the +++ * labels have never been accurate to what was really implemented. +++ * The new labels correctly reflect what the values effectively mean. +++ */ +++#if defined(__GNUC__) && (__GNUC__ > 3) +++/* make GCC warn when we use these #defines */ +++/* +++ * This is really painful because GCC doesn't allow us to mark random +++ * #defines as deprecated. We can only mark the following: +++ * functions, variables, and types. +++ * const variables will create extra storage for everyone including this +++ * header file, so it's undesirable. +++ * functions could be inlined to prevent storage creation, but will fail +++ * when constant values are expected (like switch statements). +++ * enum types do not seem to pay attention to the deprecated attribute. +++ * +++ * That leaves typedefs. We declare new types that we then deprecate, then +++ * cast the resulting value to the deprecated type in the #define, thus +++ * producting the warning when the #define is used. +++ */ +++#if (__GNUC__ == 4) && (__GNUC_MINOR__ < 5) +++/* The mac doesn't like the friendlier deprecate messages. I'm assuming this +++ * is a gcc version issue rather than mac or ppc specific */ +++typedef CK_TRUST __CKT_NSS_UNTRUSTED __attribute__((deprecated)); +++typedef CK_TRUST __CKT_NSS_VALID __attribute__((deprecated)); +++typedef CK_TRUST __CKT_NSS_MUST_VERIFY __attribute__((deprecated)); +++#else +++/* when possible, get a full deprecation warning. This works on gcc 4.5 +++ * it may work on earlier versions of gcc */ +++typedef CK_TRUST __CKT_NSS_UNTRUSTED __attribute__((deprecated("CKT_NSS_UNTRUSTED really means CKT_NSS_MUST_VERIFY_TRUST"))); +++typedef CK_TRUST __CKT_NSS_VALID __attribute__((deprecated("CKT_NSS_VALID really means CKT_NSS_NOT_TRUSTED"))); +++typedef CK_TRUST __CKT_NSS_MUST_VERIFY __attribute__((deprecated("CKT_NSS_MUST_VERIFY really functions as CKT_NSS_TRUST_UNKNOWN"))); +++#endif +++#define CKT_NSS_UNTRUSTED ((__CKT_NSS_UNTRUSTED)CKT_NSS_MUST_VERIFY_TRUST) +++#define CKT_NSS_VALID ((__CKT_NSS_VALID)CKT_NSS_NOT_TRUSTED) +++/* keep the old value for compatibility reasons*/ +++#define CKT_NSS_MUST_VERIFY ((__CKT_NSS_MUST_VERIFY)(CKT_NSS + 4)) +++#else +++#ifdef _WIN32 +++/* This magic gets the windows compiler to give us a deprecation +++ * warning */ +++#pragma deprecated(CKT_NSS_UNTRUSTED, CKT_NSS_MUST_VERIFY, CKT_NSS_VALID) +++#endif +++/* CKT_NSS_UNTRUSTED really means CKT_NSS_MUST_VERIFY_TRUST */ +++#define CKT_NSS_UNTRUSTED CKT_NSS_MUST_VERIFY_TRUST +++/* CKT_NSS_VALID really means CKT_NSS_NOT_TRUSTED */ +++#define CKT_NSS_VALID CKT_NSS_NOT_TRUSTED +++/* CKT_NSS_MUST_VERIFY was always treated as CKT_NSS_TRUST_UNKNOWN */ +++#define CKT_NSS_MUST_VERIFY (CKT_NSS + 4) /*really means trust unknown*/ +++#endif +++ +++/* +++ * These are not really PKCS #11 values specifically. They are the 'loadable' +++ * module spec NSS uses. They are available for others to use as well, but not +++ * part of the formal PKCS #11 spec. +++ * +++ * The function 'FIND' returns an array of PKCS #11 initialization strings +++ * The function 'ADD' takes a PKCS #11 initialization string and stores it. +++ * The function 'DEL' takes a 'name= library=' value and deletes the associated +++ * string. +++ * The function 'RELEASE' frees the array returned by 'FIND' +++ */ +++#define SECMOD_MODULE_DB_FUNCTION_FIND 0 +++#define SECMOD_MODULE_DB_FUNCTION_ADD 1 +++#define SECMOD_MODULE_DB_FUNCTION_DEL 2 +++#define SECMOD_MODULE_DB_FUNCTION_RELEASE 3 +++typedef char **(PR_CALLBACK *SECMODModuleDBFunc)(unsigned long function, +++ char *parameters, void *moduleSpec); +++ +++/* softoken slot ID's */ +++#define SFTK_MIN_USER_SLOT_ID 4 +++#define SFTK_MAX_USER_SLOT_ID 100 +++#define SFTK_MIN_FIPS_USER_SLOT_ID 101 +++#define SFTK_MAX_FIPS_USER_SLOT_ID 127 +++ +++/* Module Interface. This is the old NSS private module interface, now exported +++ * as a PKCS #11 v3 interface. It's interface name is +++ * "Vendor NSS Module Interface" */ +++typedef char **(*CK_NSS_ModuleDBFunc)(unsigned long function, +++ char *parameters, void *args); +++typedef struct CK_NSS_MODULE_FUNCTIONS { +++ CK_VERSION version; +++ CK_NSS_ModuleDBFunc NSC_ModuleDBFunc; +++} CK_NSS_MODULE_FUNCTIONS; +++ +++/* There was an inconsistency between the spec and the header file in defining +++ * the CK_GCM_PARAMS structure. The authoritative reference is the header file, +++ * but NSS used the spec when adding it to its own header. In V3 we've +++ * corrected it, but we need to handle the old case for devices that followed +++ * us in using the incorrect specification. */ +++typedef struct CK_NSS_GCM_PARAMS { +++ CK_BYTE_PTR pIv; +++ CK_ULONG ulIvLen; +++ CK_BYTE_PTR pAAD; +++ CK_ULONG ulAADLen; +++ CK_ULONG ulTagBits; +++} CK_NSS_GCM_PARAMS; +++ +++typedef CK_NSS_GCM_PARAMS CK_PTR CK_NSS_GCM_PARAMS_PTR; +++ +++/* deprecated #defines. Drop in future NSS releases */ +++#ifdef NSS_PKCS11_2_0_COMPAT +++ +++/* defines that were changed between NSS's PKCS #11 and the Oasis headers */ +++#define CKF_EC_FP CKF_EC_F_P +++#define CKO_KG_PARAMETERS CKO_DOMAIN_PARAMETERS +++#define CK_INVALID_SESSION CK_INVALID_HANDLE +++#define CKR_KEY_PARAMS_INVALID 0x0000006B +++ +++/* use the old wrong CK_GCM_PARAMS is NSS_PCKS11_2_0_COMPAT is defined */ +++typedef struct CK_NSS_GCM_PARAMS CK_GCM_PARAMS; +++typedef CK_NSS_GCM_PARAMS CK_PTR CK_GCM_PARAMS_PTR; +++ +++/* don't leave old programs in a lurch just yet, give them the old NETSCAPE +++ * synonym if NSS_PKCS11_2_0_COMPAT is defined*/ +++#define CKO_NETSCAPE_CRL CKO_NSS_CRL +++#define CKO_NETSCAPE_SMIME CKO_NSS_SMIME +++#define CKO_NETSCAPE_TRUST CKO_NSS_TRUST +++#define CKO_NETSCAPE_BUILTIN_ROOT_LIST CKO_NSS_BUILTIN_ROOT_LIST +++#define CKO_NETSCAPE_NEWSLOT CKO_NSS_NEWSLOT +++#define CKO_NETSCAPE_DELSLOT CKO_NSS_DELSLOT +++#define CKK_NETSCAPE_PKCS8 CKK_NSS_PKCS8 +++#define CKA_NETSCAPE_URL CKA_NSS_URL +++#define CKA_NETSCAPE_EMAIL CKA_NSS_EMAIL +++#define CKA_NETSCAPE_SMIME_INFO CKA_NSS_SMIME_INFO +++#define CKA_NETSCAPE_SMIME_TIMESTAMP CKA_NSS_SMIME_TIMESTAMP +++#define CKA_NETSCAPE_PKCS8_SALT CKA_NSS_PKCS8_SALT +++#define CKA_NETSCAPE_PASSWORD_CHECK CKA_NSS_PASSWORD_CHECK +++#define CKA_NETSCAPE_EXPIRES CKA_NSS_EXPIRES +++#define CKA_NETSCAPE_KRL CKA_NSS_KRL +++#define CKA_NETSCAPE_PQG_COUNTER CKA_NSS_PQG_COUNTER +++#define CKA_NETSCAPE_PQG_SEED CKA_NSS_PQG_SEED +++#define CKA_NETSCAPE_PQG_H CKA_NSS_PQG_H +++#define CKA_NETSCAPE_PQG_SEED_BITS CKA_NSS_PQG_SEED_BITS +++#define CKA_NETSCAPE_MODULE_SPEC CKA_NSS_MODULE_SPEC +++#define CKA_NETSCAPE_DB CKA_NSS_DB +++#define CKA_NETSCAPE_TRUST CKA_NSS_TRUST +++#define CKM_NETSCAPE_AES_KEY_WRAP CKM_NSS_AES_KEY_WRAP +++#define CKM_NETSCAPE_AES_KEY_WRAP_PAD CKM_NSS_AES_KEY_WRAP_PAD +++#define CKM_NETSCAPE_PBE_SHA1_DES_CBC CKM_NSS_PBE_SHA1_DES_CBC +++#define CKM_NETSCAPE_PBE_SHA1_TRIPLE_DES_CBC CKM_NSS_PBE_SHA1_TRIPLE_DES_CBC +++#define CKM_NETSCAPE_PBE_SHA1_40_BIT_RC2_CBC CKM_NSS_PBE_SHA1_40_BIT_RC2_CBC +++#define CKM_NETSCAPE_PBE_SHA1_128_BIT_RC2_CBC CKM_NSS_PBE_SHA1_128_BIT_RC2_CBC +++#define CKM_NETSCAPE_PBE_SHA1_40_BIT_RC4 CKM_NSS_PBE_SHA1_40_BIT_RC4 +++#define CKM_NETSCAPE_PBE_SHA1_128_BIT_RC4 CKM_NSS_PBE_SHA1_128_BIT_RC4 +++#define CKM_NETSCAPE_PBE_SHA1_FAULTY_3DES_CBC CKM_NSS_PBE_SHA1_FAULTY_3DES_CBC +++#define CKM_NETSCAPE_PBE_SHA1_HMAC_KEY_GEN CKM_NSS_PBE_SHA1_HMAC_KEY_GEN +++#define CKM_NETSCAPE_PBE_MD5_HMAC_KEY_GEN CKM_NSS_PBE_MD5_HMAC_KEY_GEN +++#define CKM_NETSCAPE_PBE_MD2_HMAC_KEY_GEN CKM_NSS_PBE_MD2_HMAC_KEY_GEN +++#define CKR_NETSCAPE_CERTDB_FAILED CKR_NSS_CERTDB_FAILED +++#define CKR_NETSCAPE_KEYDB_FAILED CKR_NSS_KEYDB_FAILED +++ +++#define CKT_NETSCAPE_TRUSTED CKT_NSS_TRUSTED +++#define CKT_NETSCAPE_TRUSTED_DELEGATOR CKT_NSS_TRUSTED_DELEGATOR +++#define CKT_NETSCAPE_UNTRUSTED CKT_NSS_UNTRUSTED +++#define CKT_NETSCAPE_MUST_VERIFY CKT_NSS_MUST_VERIFY +++#define CKT_NETSCAPE_TRUST_UNKNOWN CKT_NSS_TRUST_UNKNOWN +++#define CKT_NETSCAPE_VALID CKT_NSS_VALID +++#define CKT_NETSCAPE_VALID_DELEGATOR CKT_NSS_VALID_DELEGATOR +++#else +++/* use the new CK_GCM_PARAMS if NSS_PKCS11_2_0_COMPAT is not defined */ +++typedef struct CK_GCM_PARAMS_V3 CK_GCM_PARAMS; +++typedef CK_GCM_PARAMS_V3 CK_PTR CK_GCM_PARAMS_PTR; +++#endif +++ +++#endif /* _PKCS11N_H_ */ ++--- a/src/jdk.crypto.cryptoki/share/native/libj2pkcs11/pkcs11t.h +++++ b/src/jdk.crypto.cryptoki/share/native/libj2pkcs11/pkcs11t.h ++@@ -1,1150 +1,1265 @@ ++-/* Copyright (c) OASIS Open 2016, 2019. All Rights Reserved./ ++- * /Distributed under the terms of the OASIS IPR Policy, ++- * [http://www.oasis-open.org/policies-guidelines/ipr], AS-IS, WITHOUT ANY ++- * IMPLIED OR EXPRESS WARRANTY; there is no warranty of MERCHANTABILITY, FITNESS FOR A ++- * PARTICULAR PURPOSE or NONINFRINGEMENT of the rights of others. ++- */ ++- ++-/* Latest version of the specification: ++- * http://docs.oasis-open.org/pkcs11/pkcs11-base/v2.40/pkcs11-base-v2.40.html ++- */ ++- ++-/* See top of pkcs11.h for information about the macros that ++- * must be defined and the structure-packing conventions that ++- * must be set before including this file. +++/* This Source Code Form is subject to the terms of the Mozilla Public +++ * License, v. 2.0. If a copy of the MPL was not distributed with this +++ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ +++/* License to copy and use this software is granted provided that it is +++ * identified as "RSA Security Inc. PKCS #11 Cryptographic Token Interface +++ * (Cryptoki)" in all material mentioning or referencing this software. +++ +++ * License is also granted to make and use derivative works provided that +++ * such works are identified as "derived from the RSA Security Inc. PKCS #11 +++ * Cryptographic Token Interface (Cryptoki)" in all material mentioning or +++ * referencing the derived work. +++ +++ * RSA Security Inc. makes no representations concerning either the +++ * merchantability of this software or the suitability of this software for +++ * any particular purpose. It is provided "as is" without express or implied +++ * warranty of any kind. ++ */ ++ ++ #ifndef _PKCS11T_H_ ++ #define _PKCS11T_H_ 1 ++ ++-#define CRYPTOKI_VERSION_MAJOR 3 ++-#define CRYPTOKI_VERSION_MINOR 0 ++-#define CRYPTOKI_VERSION_AMENDMENT 0 ++- ++-#define CK_TRUE 1 ++-#define CK_FALSE 0 ++- ++-#ifndef CK_DISABLE_TRUE_FALSE ++-#ifndef FALSE ++-#define FALSE CK_FALSE ++-#endif ++-#ifndef TRUE ++-#define TRUE CK_TRUE ++-#endif +++#define CK_TRUE 1 +++#define CK_FALSE 0 +++ +++#include "prtypes.h" +++ +++#define CK_PTR * +++#define CK_NULL_PTR 0 +++#define CK_CALLBACK_FUNCTION(rtype, func) rtype(PR_CALLBACK *func) +++#define CK_DECLARE_FUNCTION(rtype, func) extern rtype func +++#define CK_DECLARE_FUNCTION_POINTER(rtype, func) rtype(PR_CALLBACK *func) +++ +++#ifdef NSS_PCKS11_2_0_COMPAT +++#define prfHashMechanism prfMechanism ++ #endif ++ +++#define CRYPTOKI_VERSION_MAJOR 3 +++#define CRYPTOKI_VERSION_MINOR 0 +++#define CRYPTOKI_VERSION_AMENDMENT 0 +++ ++ /* an unsigned 8-bit value */ ++-typedef unsigned char CK_BYTE; +++typedef unsigned char CK_BYTE; ++ ++ /* an unsigned 8-bit character */ ++-typedef CK_BYTE CK_CHAR; +++typedef CK_BYTE CK_CHAR; ++ ++ /* an 8-bit UTF-8 character */ ++-typedef CK_BYTE CK_UTF8CHAR; +++typedef CK_BYTE CK_UTF8CHAR; ++ ++ /* a BYTE-sized Boolean flag */ ++-typedef CK_BYTE CK_BBOOL; +++typedef CK_BYTE CK_BBOOL; ++ ++ /* an unsigned value, at least 32 bits long */ ++ typedef unsigned long int CK_ULONG; ++ ++ /* a signed value, the same size as a CK_ULONG */ ++-typedef long int CK_LONG; +++/* CK_LONG is new for v2.0 */ +++typedef long int CK_LONG; ++ ++ /* at least 32 bits; each bit is a Boolean flag */ ++-typedef CK_ULONG CK_FLAGS; ++- +++typedef CK_ULONG CK_FLAGS; ++ ++ /* some special values for certain CK_ULONG variables */ ++-#define CK_UNAVAILABLE_INFORMATION (~0UL) ++-#define CK_EFFECTIVELY_INFINITE 0UL ++- +++#define CK_UNAVAILABLE_INFORMATION (~0UL) +++#define CK_EFFECTIVELY_INFINITE 0 ++ ++-typedef CK_BYTE CK_PTR CK_BYTE_PTR; ++-typedef CK_CHAR CK_PTR CK_CHAR_PTR; ++-typedef CK_UTF8CHAR CK_PTR CK_UTF8CHAR_PTR; ++-typedef CK_ULONG CK_PTR CK_ULONG_PTR; ++-typedef void CK_PTR CK_VOID_PTR; +++typedef CK_BYTE CK_PTR CK_BYTE_PTR; +++typedef CK_CHAR CK_PTR CK_CHAR_PTR; +++typedef CK_UTF8CHAR CK_PTR CK_UTF8CHAR_PTR; +++typedef CK_ULONG CK_PTR CK_ULONG_PTR; +++typedef void CK_PTR CK_VOID_PTR; ++ ++ /* Pointer to a CK_VOID_PTR-- i.e., pointer to pointer to void */ ++ typedef CK_VOID_PTR CK_PTR CK_VOID_PTR_PTR; ++ +++/* The following value is always invalid if used as a session */ +++/* handle or object handle */ +++#define CK_INVALID_HANDLE 0 ++ ++-/* The following value is always invalid if used as a session ++- * handle or object handle ++- */ ++-#define CK_INVALID_HANDLE 0UL ++- +++/* pack */ +++// #include "pkcs11p.h" ++ ++ typedef struct CK_VERSION { ++- CK_BYTE major; /* integer portion of version number */ ++- CK_BYTE minor; /* 1/100ths portion of version number */ +++ CK_BYTE major; /* integer portion of version number */ +++ CK_BYTE minor; /* 1/100ths portion of version number */ ++ } CK_VERSION; ++ ++ typedef CK_VERSION CK_PTR CK_VERSION_PTR; ++ ++- ++ typedef struct CK_INFO { ++- CK_VERSION cryptokiVersion; /* Cryptoki interface ver */ ++- CK_UTF8CHAR manufacturerID[32]; /* blank padded */ ++- CK_FLAGS flags; /* must be zero */ +++ /* manufacturerID and libraryDecription have been changed from +++ * CK_CHAR to CK_UTF8CHAR for v2.10 */ +++ CK_VERSION cryptokiVersion; /* PKCS #11 interface ver */ +++ CK_UTF8CHAR manufacturerID[32]; /* blank padded */ +++ CK_FLAGS flags; /* must be zero */ +++ +++ /* libraryDescription and libraryVersion are new for v2.0 */ ++ CK_UTF8CHAR libraryDescription[32]; /* blank padded */ ++- CK_VERSION libraryVersion; /* version of library */ +++ CK_VERSION libraryVersion; /* version of library */ ++ } CK_INFO; ++ ++-typedef CK_INFO CK_PTR CK_INFO_PTR; ++- +++typedef CK_INFO CK_PTR CK_INFO_PTR; ++ ++ /* CK_NOTIFICATION enumerates the types of notifications that ++- * Cryptoki provides to an application ++- */ +++ * PKCS #11 provides to an application */ +++/* CK_NOTIFICATION has been changed from an enum to a CK_ULONG +++ * for v2.0 */ ++ typedef CK_ULONG CK_NOTIFICATION; ++-#define CKN_SURRENDER 0UL ++-#define CKN_OTP_CHANGED 1UL +++#define CKN_SURRENDER 0 ++ ++-typedef CK_ULONG CK_SLOT_ID; +++typedef CK_ULONG CK_SLOT_ID; ++ ++ typedef CK_SLOT_ID CK_PTR CK_SLOT_ID_PTR; ++ ++- ++ /* CK_SLOT_INFO provides information about a slot */ ++ typedef struct CK_SLOT_INFO { ++- CK_UTF8CHAR slotDescription[64]; /* blank padded */ ++- CK_UTF8CHAR manufacturerID[32]; /* blank padded */ ++- CK_FLAGS flags; +++ /* slotDescription and manufacturerID have been changed from +++ * CK_CHAR to CK_UTF8CHAR for v2.10 */ +++ CK_UTF8CHAR slotDescription[64]; /* blank padded */ +++ CK_UTF8CHAR manufacturerID[32]; /* blank padded */ +++ CK_FLAGS flags; ++ ++- CK_VERSION hardwareVersion; /* version of hardware */ ++- CK_VERSION firmwareVersion; /* version of firmware */ +++ /* hardwareVersion and firmwareVersion are new for v2.0 */ +++ CK_VERSION hardwareVersion; /* version of hardware */ +++ CK_VERSION firmwareVersion; /* version of firmware */ ++ } CK_SLOT_INFO; ++ ++ /* flags: bit flags that provide capabilities of the slot ++ * Bit Flag Mask Meaning ++ */ ++-#define CKF_TOKEN_PRESENT 0x00000001UL /* a token is there */ ++-#define CKF_REMOVABLE_DEVICE 0x00000002UL /* removable devices*/ ++-#define CKF_HW_SLOT 0x00000004UL /* hardware slot */ +++#define CKF_TOKEN_PRESENT 0x00000001UL /* a token is there */ +++#define CKF_REMOVABLE_DEVICE 0x00000002UL /* removable devices*/ +++#define CKF_HW_SLOT 0x00000004UL /* hardware slot */ ++ ++ typedef CK_SLOT_INFO CK_PTR CK_SLOT_INFO_PTR; ++ ++- ++ /* CK_TOKEN_INFO provides information about a token */ ++ typedef struct CK_TOKEN_INFO { ++- CK_UTF8CHAR label[32]; /* blank padded */ ++- CK_UTF8CHAR manufacturerID[32]; /* blank padded */ ++- CK_UTF8CHAR model[16]; /* blank padded */ ++- CK_CHAR serialNumber[16]; /* blank padded */ ++- CK_FLAGS flags; /* see below */ ++- ++- CK_ULONG ulMaxSessionCount; /* max open sessions */ ++- CK_ULONG ulSessionCount; /* sess. now open */ ++- CK_ULONG ulMaxRwSessionCount; /* max R/W sessions */ ++- CK_ULONG ulRwSessionCount; /* R/W sess. now open */ ++- CK_ULONG ulMaxPinLen; /* in bytes */ ++- CK_ULONG ulMinPinLen; /* in bytes */ ++- CK_ULONG ulTotalPublicMemory; /* in bytes */ ++- CK_ULONG ulFreePublicMemory; /* in bytes */ ++- CK_ULONG ulTotalPrivateMemory; /* in bytes */ ++- CK_ULONG ulFreePrivateMemory; /* in bytes */ ++- CK_VERSION hardwareVersion; /* version of hardware */ ++- CK_VERSION firmwareVersion; /* version of firmware */ ++- CK_CHAR utcTime[16]; /* time */ +++ /* label, manufacturerID, and model have been changed from +++ * CK_CHAR to CK_UTF8CHAR for v2.10 */ +++ CK_UTF8CHAR label[32]; /* blank padded */ +++ CK_UTF8CHAR manufacturerID[32]; /* blank padded */ +++ CK_UTF8CHAR model[16]; /* blank padded */ +++ CK_CHAR serialNumber[16]; /* blank padded */ +++ CK_FLAGS flags; /* see below */ +++ +++ /* ulMaxSessionCount, ulSessionCount, ulMaxRwSessionCount, +++ * ulRwSessionCount, ulMaxPinLen, and ulMinPinLen have all been +++ * changed from CK_USHORT to CK_ULONG for v2.0 */ +++ CK_ULONG ulMaxSessionCount; /* max open sessions */ +++ CK_ULONG ulSessionCount; /* sess. now open */ +++ CK_ULONG ulMaxRwSessionCount; /* max R/W sessions */ +++ CK_ULONG ulRwSessionCount; /* R/W sess. now open */ +++ CK_ULONG ulMaxPinLen; /* in bytes */ +++ CK_ULONG ulMinPinLen; /* in bytes */ +++ CK_ULONG ulTotalPublicMemory; /* in bytes */ +++ CK_ULONG ulFreePublicMemory; /* in bytes */ +++ CK_ULONG ulTotalPrivateMemory; /* in bytes */ +++ CK_ULONG ulFreePrivateMemory; /* in bytes */ +++ +++ /* hardwareVersion, firmwareVersion, and time are new for +++ * v2.0 */ +++ CK_VERSION hardwareVersion; /* version of hardware */ +++ CK_VERSION firmwareVersion; /* version of firmware */ +++ CK_CHAR utcTime[16]; /* time */ ++ } CK_TOKEN_INFO; ++ ++ /* The flags parameter is defined as follows: ++ * Bit Flag Mask Meaning ++ */ ++-#define CKF_RNG 0x00000001UL /* has random # generator */ ++-#define CKF_WRITE_PROTECTED 0x00000002UL /* token is write-protected */ ++-#define CKF_LOGIN_REQUIRED 0x00000004UL /* user must login */ ++-#define CKF_USER_PIN_INITIALIZED 0x00000008UL /* normal user's PIN is set */ +++#define CKF_RNG 0x00000001UL /* has random # \ +++ * generator */ +++#define CKF_WRITE_PROTECTED 0x00000002UL /* token is \ +++ * write- \ +++ * protected */ +++#define CKF_LOGIN_REQUIRED 0x00000004UL /* user must \ +++ * login */ +++#define CKF_USER_PIN_INITIALIZED 0x00000008UL /* normal user's \ +++ * PIN is set */ ++ ++-/* CKF_RESTORE_KEY_NOT_NEEDED. If it is set, +++/* CKF_RESTORE_KEY_NOT_NEEDED is new for v2.0. If it is set, ++ * that means that *every* time the state of cryptographic ++ * operations of a session is successfully saved, all keys ++- * needed to continue those operations are stored in the state ++- */ ++-#define CKF_RESTORE_KEY_NOT_NEEDED 0x00000020UL +++ * needed to continue those operations are stored in the state */ +++#define CKF_RESTORE_KEY_NOT_NEEDED 0x00000020UL ++ ++-/* CKF_CLOCK_ON_TOKEN. If it is set, that means +++/* CKF_CLOCK_ON_TOKEN is new for v2.0. If it is set, that means ++ * that the token has some sort of clock. The time on that ++- * clock is returned in the token info structure ++- */ ++-#define CKF_CLOCK_ON_TOKEN 0x00000040UL +++ * clock is returned in the token info structure */ +++#define CKF_CLOCK_ON_TOKEN 0x00000040UL ++ ++-/* CKF_PROTECTED_AUTHENTICATION_PATH. If it is +++/* CKF_PROTECTED_AUTHENTICATION_PATH is new for v2.0. If it is ++ * set, that means that there is some way for the user to login ++- * without sending a PIN through the Cryptoki library itself ++- */ +++ * without sending a PIN through the PKCS #11 library itself */ ++ #define CKF_PROTECTED_AUTHENTICATION_PATH 0x00000100UL ++ ++-/* CKF_DUAL_CRYPTO_OPERATIONS. If it is true, +++/* CKF_DUAL_CRYPTO_OPERATIONS is new for v2.0. If it is true, ++ * that means that a single session with the token can perform ++ * dual simultaneous cryptographic operations (digest and ++ * encrypt; decrypt and digest; sign and encrypt; and decrypt ++- * and sign) ++- */ ++-#define CKF_DUAL_CRYPTO_OPERATIONS 0x00000200UL +++ * and sign) */ +++#define CKF_DUAL_CRYPTO_OPERATIONS 0x00000200UL ++ ++-/* CKF_TOKEN_INITIALIZED. If it is true, the +++/* CKF_TOKEN_INITIALIZED if new for v2.10. If it is true, the ++ * token has been initialized using C_InitializeToken or an ++ * equivalent mechanism outside the scope of PKCS #11. ++ * Calling C_InitializeToken when this flag is set will cause ++- * the token to be reinitialized. ++- */ ++-#define CKF_TOKEN_INITIALIZED 0x00000400UL +++ * the token to be reinitialized. */ +++#define CKF_TOKEN_INITIALIZED 0x00000400UL ++ ++-/* CKF_SECONDARY_AUTHENTICATION. If it is +++/* CKF_SECONDARY_AUTHENTICATION if new for v2.10. If it is ++ * true, the token supports secondary authentication for ++- * private key objects. ++- */ ++-#define CKF_SECONDARY_AUTHENTICATION 0x00000800UL +++ * private key objects. This flag is deprecated in v2.11 and +++ onwards. */ +++#define CKF_SECONDARY_AUTHENTICATION 0x00000800UL ++ ++-/* CKF_USER_PIN_COUNT_LOW. If it is true, an +++/* CKF_USER_PIN_COUNT_LOW if new for v2.10. If it is true, an ++ * incorrect user login PIN has been entered at least once ++- * since the last successful authentication. ++- */ ++-#define CKF_USER_PIN_COUNT_LOW 0x00010000UL +++ * since the last successful authentication. */ +++#define CKF_USER_PIN_COUNT_LOW 0x00010000UL ++ ++-/* CKF_USER_PIN_FINAL_TRY. If it is true, ++- * supplying an incorrect user PIN will it to become locked. ++- */ ++-#define CKF_USER_PIN_FINAL_TRY 0x00020000UL +++/* CKF_USER_PIN_FINAL_TRY if new for v2.10. If it is true, +++ * supplying an incorrect user PIN will it to become locked. */ +++#define CKF_USER_PIN_FINAL_TRY 0x00020000UL ++ ++-/* CKF_USER_PIN_LOCKED. If it is true, the +++/* CKF_USER_PIN_LOCKED if new for v2.10. If it is true, the ++ * user PIN has been locked. User login to the token is not ++- * possible. ++- */ ++-#define CKF_USER_PIN_LOCKED 0x00040000UL +++ * possible. */ +++#define CKF_USER_PIN_LOCKED 0x00040000UL ++ ++-/* CKF_USER_PIN_TO_BE_CHANGED. If it is true, +++/* CKF_USER_PIN_TO_BE_CHANGED if new for v2.10. If it is true, ++ * the user PIN value is the default value set by token ++ * initialization or manufacturing, or the PIN has been ++- * expired by the card. ++- */ ++-#define CKF_USER_PIN_TO_BE_CHANGED 0x00080000UL +++ * expired by the card. */ +++#define CKF_USER_PIN_TO_BE_CHANGED 0x00080000UL ++ ++-/* CKF_SO_PIN_COUNT_LOW. If it is true, an +++/* CKF_SO_PIN_COUNT_LOW if new for v2.10. If it is true, an ++ * incorrect SO login PIN has been entered at least once since ++- * the last successful authentication. ++- */ ++-#define CKF_SO_PIN_COUNT_LOW 0x00100000UL +++ * the last successful authentication. */ +++#define CKF_SO_PIN_COUNT_LOW 0x00100000UL ++ ++-/* CKF_SO_PIN_FINAL_TRY. If it is true, ++- * supplying an incorrect SO PIN will it to become locked. ++- */ ++-#define CKF_SO_PIN_FINAL_TRY 0x00200000UL +++/* CKF_SO_PIN_FINAL_TRY if new for v2.10. If it is true, +++ * supplying an incorrect SO PIN will it to become locked. */ +++#define CKF_SO_PIN_FINAL_TRY 0x00200000UL ++ ++-/* CKF_SO_PIN_LOCKED. If it is true, the SO +++/* CKF_SO_PIN_LOCKED if new for v2.10. If it is true, the SO ++ * PIN has been locked. SO login to the token is not possible. ++ */ ++-#define CKF_SO_PIN_LOCKED 0x00400000UL +++#define CKF_SO_PIN_LOCKED 0x00400000UL ++ ++-/* CKF_SO_PIN_TO_BE_CHANGED. If it is true, +++/* CKF_SO_PIN_TO_BE_CHANGED if new for v2.10. If it is true, ++ * the SO PIN value is the default value set by token ++ * initialization or manufacturing, or the PIN has been ++- * expired by the card. ++- */ ++-#define CKF_SO_PIN_TO_BE_CHANGED 0x00800000UL +++ * expired by the card. */ +++#define CKF_SO_PIN_TO_BE_CHANGED 0x00800000UL ++ ++-#define CKF_ERROR_STATE 0x01000000UL +++#define CKF_ERROR_STATE 0x01000000UL ++ ++ typedef CK_TOKEN_INFO CK_PTR CK_TOKEN_INFO_PTR; ++ ++- ++-/* CK_SESSION_HANDLE is a Cryptoki-assigned value that ++- * identifies a session ++- */ ++-typedef CK_ULONG CK_SESSION_HANDLE; +++/* CK_SESSION_HANDLE is a PKCS #11-assigned value that +++ * identifies a session */ +++typedef CK_ULONG CK_SESSION_HANDLE; ++ ++ typedef CK_SESSION_HANDLE CK_PTR CK_SESSION_HANDLE_PTR; ++ ++- ++-/* CK_USER_TYPE enumerates the types of Cryptoki users */ ++-typedef CK_ULONG CK_USER_TYPE; +++/* CK_USER_TYPE enumerates the types of PKCS #11 users */ +++/* CK_USER_TYPE has been changed from an enum to a CK_ULONG for +++ * v2.0 */ +++typedef CK_ULONG CK_USER_TYPE; ++ /* Security Officer */ ++-#define CKU_SO 0UL +++#define CKU_SO 0 ++ /* Normal user */ ++-#define CKU_USER 1UL ++-/* Context specific */ ++-#define CKU_CONTEXT_SPECIFIC 2UL +++#define CKU_USER 1 +++/* Context specific (added in v2.20) */ +++#define CKU_CONTEXT_SPECIFIC 2 ++ ++ /* CK_STATE enumerates the session states */ ++-typedef CK_ULONG CK_STATE; ++-#define CKS_RO_PUBLIC_SESSION 0UL ++-#define CKS_RO_USER_FUNCTIONS 1UL ++-#define CKS_RW_PUBLIC_SESSION 2UL ++-#define CKS_RW_USER_FUNCTIONS 3UL ++-#define CKS_RW_SO_FUNCTIONS 4UL +++/* CK_STATE has been changed from an enum to a CK_ULONG for +++ * v2.0 */ +++typedef CK_ULONG CK_STATE; +++#define CKS_RO_PUBLIC_SESSION 0 +++#define CKS_RO_USER_FUNCTIONS 1 +++#define CKS_RW_PUBLIC_SESSION 2 +++#define CKS_RW_USER_FUNCTIONS 3 +++#define CKS_RW_SO_FUNCTIONS 4 ++ ++ /* CK_SESSION_INFO provides information about a session */ ++ typedef struct CK_SESSION_INFO { ++ CK_SLOT_ID slotID; ++- CK_STATE state; ++- CK_FLAGS flags; /* see below */ ++- CK_ULONG ulDeviceError; /* device-dependent error code */ +++ CK_STATE state; +++ CK_FLAGS flags; /* see below */ +++ +++ /* ulDeviceError was changed from CK_USHORT to CK_ULONG for +++ * v2.0 */ +++ CK_ULONG ulDeviceError; /* device-dependent error code */ ++ } CK_SESSION_INFO; ++ ++ /* The flags are defined in the following table: ++ * Bit Flag Mask Meaning ++ */ ++-#define CKF_RW_SESSION 0x00000002UL /* session is r/w */ ++-#define CKF_SERIAL_SESSION 0x00000004UL /* no parallel */ +++#define CKF_RW_SESSION 0x00000002UL /* session is r/w */ +++#define CKF_SERIAL_SESSION 0x00000004UL /* no parallel */ ++ ++ typedef CK_SESSION_INFO CK_PTR CK_SESSION_INFO_PTR; ++ ++- ++ /* CK_OBJECT_HANDLE is a token-specific identifier for an ++- * object ++- */ ++-typedef CK_ULONG CK_OBJECT_HANDLE; +++ * object */ +++typedef CK_ULONG CK_OBJECT_HANDLE; ++ ++ typedef CK_OBJECT_HANDLE CK_PTR CK_OBJECT_HANDLE_PTR; ++ ++- ++ /* CK_OBJECT_CLASS is a value that identifies the classes (or ++- * types) of objects that Cryptoki recognizes. It is defined ++- * as follows: ++- */ ++-typedef CK_ULONG CK_OBJECT_CLASS; +++ * types) of objects that PKCS #11 recognizes. It is defined +++ * as follows: */ +++/* CK_OBJECT_CLASS was changed from CK_USHORT to CK_ULONG for +++ * v2.0 */ +++typedef CK_ULONG CK_OBJECT_CLASS; ++ ++ /* The following classes of objects are defined: */ ++-#define CKO_DATA 0x00000000UL ++-#define CKO_CERTIFICATE 0x00000001UL ++-#define CKO_PUBLIC_KEY 0x00000002UL ++-#define CKO_PRIVATE_KEY 0x00000003UL ++-#define CKO_SECRET_KEY 0x00000004UL ++-#define CKO_HW_FEATURE 0x00000005UL +++/* CKO_HW_FEATURE is new for v2.10 */ +++/* CKO_DOMAIN_PARAMETERS is new for v2.11 */ +++/* CKO_MECHANISM is new for v2.20 */ +++/* CKO_PROFILE is new for v3.00 */ +++#define CKO_DATA 0x00000000UL +++#define CKO_CERTIFICATE 0x00000001UL +++#define CKO_PUBLIC_KEY 0x00000002UL +++#define CKO_PRIVATE_KEY 0x00000003UL +++#define CKO_SECRET_KEY 0x00000004UL +++#define CKO_HW_FEATURE 0x00000005UL ++ #define CKO_DOMAIN_PARAMETERS 0x00000006UL ++-#define CKO_MECHANISM 0x00000007UL ++-#define CKO_OTP_KEY 0x00000008UL ++-#define CKO_PROFILE 0x00000009UL ++- ++-#define CKO_VENDOR_DEFINED 0x80000000UL +++#define CKO_MECHANISM 0x00000007UL +++#define CKO_PROFILE 0x00000009UL +++#define CKO_VENDOR_DEFINED 0x80000000UL ++ ++ typedef CK_OBJECT_CLASS CK_PTR CK_OBJECT_CLASS_PTR; ++ +++/* CK_PROFILE_ID is new for v3.00. CK_PROFILE_ID is a value that +++ * identifies the profile that the token supports. */ +++typedef CK_ULONG CK_PROFILE_ID; +++ ++ /* Profile ID's */ ++-#define CKP_INVALID_ID 0x00000000UL ++-#define CKP_BASELINE_PROVIDER 0x00000001UL ++-#define CKP_EXTENDED_PROVIDER 0x00000002UL ++-#define CKP_AUTHENTICATION_TOKEN 0x00000003UL +++#define CKP_INVALID_ID 0x00000000UL +++#define CKP_BASELINE_PROVIDER 0x00000001UL +++#define CKP_EXTENDED_PROVIDER 0x00000002UL +++#define CKP_AUTHENTICATION_TOKEN 0x00000003UL ++ #define CKP_PUBLIC_CERTIFICATES_TOKEN 0x00000004UL ++-#define CKP_VENDOR_DEFINED 0x80000000UL +++#define CKP_VENDOR_DEFINED 0x80000000UL ++ ++-/* CK_HW_FEATURE_TYPE is a value that identifies the hardware feature type ++- * of an object with CK_OBJECT_CLASS equal to CKO_HW_FEATURE. ++- */ ++-typedef CK_ULONG CK_HW_FEATURE_TYPE; +++/* CK_HW_FEATURE_TYPE is new for v2.10. CK_HW_FEATURE_TYPE is a +++ * value that identifies the hardware feature type of an object +++ * with CK_OBJECT_CLASS equal to CKO_HW_FEATURE. */ +++typedef CK_ULONG CK_HW_FEATURE_TYPE; ++ ++ /* The following hardware feature types are defined */ ++-#define CKH_MONOTONIC_COUNTER 0x00000001UL ++-#define CKH_CLOCK 0x00000002UL ++-#define CKH_USER_INTERFACE 0x00000003UL ++-#define CKH_VENDOR_DEFINED 0x80000000UL +++/* CKH_USER_INTERFACE is new for v2.20 */ +++#define CKH_MONOTONIC_COUNTER 0x00000001UL +++#define CKH_CLOCK 0x00000002UL +++#define CKH_USER_INTERFACE 0x00000003UL +++#define CKH_VENDOR_DEFINED 0x80000000UL ++ ++ /* CK_KEY_TYPE is a value that identifies a key type */ ++-typedef CK_ULONG CK_KEY_TYPE; +++/* CK_KEY_TYPE was changed from CK_USHORT to CK_ULONG for v2.0 */ +++typedef CK_ULONG CK_KEY_TYPE; ++ ++ /* the following key types are defined: */ ++-#define CKK_RSA 0x00000000UL ++-#define CKK_DSA 0x00000001UL ++-#define CKK_DH 0x00000002UL ++-#define CKK_ECDSA 0x00000003UL /* Deprecated */ ++-#define CKK_EC 0x00000003UL ++-#define CKK_X9_42_DH 0x00000004UL ++-#define CKK_KEA 0x00000005UL ++-#define CKK_GENERIC_SECRET 0x00000010UL ++-#define CKK_RC2 0x00000011UL ++-#define CKK_RC4 0x00000012UL ++-#define CKK_DES 0x00000013UL ++-#define CKK_DES2 0x00000014UL ++-#define CKK_DES3 0x00000015UL ++-#define CKK_CAST 0x00000016UL ++-#define CKK_CAST3 0x00000017UL ++-#define CKK_CAST5 0x00000018UL /* Deprecated */ ++-#define CKK_CAST128 0x00000018UL ++-#define CKK_RC5 0x00000019UL ++-#define CKK_IDEA 0x0000001AUL ++-#define CKK_SKIPJACK 0x0000001BUL ++-#define CKK_BATON 0x0000001CUL ++-#define CKK_JUNIPER 0x0000001DUL ++-#define CKK_CDMF 0x0000001EUL ++-#define CKK_AES 0x0000001FUL ++-#define CKK_BLOWFISH 0x00000020UL ++-#define CKK_TWOFISH 0x00000021UL ++-#define CKK_SECURID 0x00000022UL ++-#define CKK_HOTP 0x00000023UL ++-#define CKK_ACTI 0x00000024UL ++-#define CKK_CAMELLIA 0x00000025UL ++-#define CKK_ARIA 0x00000026UL ++- ++-/* the following definitions were added in the 2.30 header file, ++- * but never defined in the spec. */ ++-#define CKK_MD5_HMAC 0x00000027UL ++-#define CKK_SHA_1_HMAC 0x00000028UL ++-#define CKK_RIPEMD128_HMAC 0x00000029UL ++-#define CKK_RIPEMD160_HMAC 0x0000002AUL ++-#define CKK_SHA256_HMAC 0x0000002BUL ++-#define CKK_SHA384_HMAC 0x0000002CUL ++-#define CKK_SHA512_HMAC 0x0000002DUL ++-#define CKK_SHA224_HMAC 0x0000002EUL ++- ++-#define CKK_SEED 0x0000002FUL ++-#define CKK_GOSTR3410 0x00000030UL ++-#define CKK_GOSTR3411 0x00000031UL ++-#define CKK_GOST28147 0x00000032UL ++-#define CKK_CHACHA20 0x00000033UL ++-#define CKK_POLY1305 0x00000034UL ++-#define CKK_AES_XTS 0x00000035UL ++-#define CKK_SHA3_224_HMAC 0x00000036UL ++-#define CKK_SHA3_256_HMAC 0x00000037UL ++-#define CKK_SHA3_384_HMAC 0x00000038UL ++-#define CKK_SHA3_512_HMAC 0x00000039UL ++-#define CKK_BLAKE2B_160_HMAC 0x0000003aUL ++-#define CKK_BLAKE2B_256_HMAC 0x0000003bUL ++-#define CKK_BLAKE2B_384_HMAC 0x0000003cUL ++-#define CKK_BLAKE2B_512_HMAC 0x0000003dUL ++-#define CKK_SALSA20 0x0000003eUL ++-#define CKK_X2RATCHET 0x0000003fUL ++-#define CKK_EC_EDWARDS 0x00000040UL ++-#define CKK_EC_MONTGOMERY 0x00000041UL ++-#define CKK_HKDF 0x00000042UL ++-#define CKK_SHA512_224_HMAC 0x00000043UL ++-#define CKK_SHA512_256_HMAC 0x00000044UL ++-#define CKK_SHA512_T_HMAC 0x00000045UL ++- ++-#define CKK_VENDOR_DEFINED 0x80000000UL +++#define CKK_RSA 0x00000000UL +++#define CKK_DSA 0x00000001UL +++#define CKK_DH 0x00000002UL +++ +++/* CKK_ECDSA and CKK_KEA are new for v2.0 */ +++/* CKK_ECDSA is deprecated in v2.11, CKK_EC is preferred. */ +++#define CKK_ECDSA 0x00000003UL +++#define CKK_EC 0x00000003UL +++#define CKK_X9_42_DH 0x00000004UL +++#define CKK_KEA 0x00000005UL +++ +++#define CKK_GENERIC_SECRET 0x00000010UL +++#define CKK_RC2 0x00000011UL +++#define CKK_RC4 0x00000012UL +++#define CKK_DES 0x00000013UL +++#define CKK_DES2 0x00000014UL +++#define CKK_DES3 0x00000015UL +++ +++/* all these key types are new for v2.0 */ +++#define CKK_CAST 0x00000016UL +++#define CKK_CAST3 0x00000017UL +++/* CKK_CAST5 is deprecated in v2.11, CKK_CAST128 is preferred. */ +++#define CKK_CAST5 0x00000018UL +++#define CKK_CAST128 0x00000018UL +++#define CKK_RC5 0x00000019UL +++#define CKK_IDEA 0x0000001AUL +++#define CKK_SKIPJACK 0x0000001BUL +++#define CKK_BATON 0x0000001CUL +++#define CKK_JUNIPER 0x0000001DUL +++#define CKK_CDMF 0x0000001EUL +++#define CKK_AES 0x0000001FUL +++ +++/* BlowFish and TwoFish are new for v2.20 */ +++#define CKK_BLOWFISH 0x00000020UL +++#define CKK_TWOFISH 0x00000021UL +++ +++/* Camellia is proposed for v2.20 Amendment 3 */ +++#define CKK_CAMELLIA 0x00000025UL +++ +++#define CKK_SEED 0x0000002FUL /* was 2A */ +++ +++/* added in v2.30 */ +++#define CKK_ARIA 0x00000026UL +++ +++/* added in 2.40 */ +++#define CKK_MD5_HMAC 0x00000027UL +++#define CKK_SHA_1_HMAC 0x00000028UL +++#define CKK_RIPEMD128_HMAC 0x00000029UL +++#define CKK_RIPEMD160_HMAC 0x0000002AUL +++#define CKK_SHA256_HMAC 0x0000002BUL +++#define CKK_SHA384_HMAC 0x0000002CUL +++#define CKK_SHA512_HMAC 0x0000002DUL +++#define CKK_SHA224_HMAC 0x0000002EUL +++#define CKK_GOSTR3410 0x00000030UL +++#define CKK_GOSTR3411 0x00000031UL +++#define CKK_GOST28147 0x00000032UL +++#define CKK_CHACHA20 0x00000033UL +++#define CKK_POLY1305 0x00000034UL +++#define CKK_AES_XTS 0x00000035UL +++#define CKK_SHA3_224_HMAC 0x00000036UL +++#define CKK_SHA3_256_HMAC 0x00000037UL +++#define CKK_SHA3_384_HMAC 0x00000038UL +++#define CKK_SHA3_512_HMAC 0x00000039UL +++ +++/* added in 3.0 */ +++#define CKK_BLAKE2B_160_HMAC 0x0000003aUL +++#define CKK_BLAKE2B_256_HMAC 0x0000003bUL +++#define CKK_BLAKE2B_384_HMAC 0x0000003cUL +++#define CKK_BLAKE2B_512_HMAC 0x0000003dUL +++#define CKK_SALSA20 0x0000003eUL +++#define CKK_X2RATCHET 0x0000003fUL +++#define CKK_EC_EDWARDS 0x00000040UL +++#define CKK_EC_MONTGOMERY 0x00000041UL +++#define CKK_HKDF 0x00000042UL +++#define CKK_SHA512_224_HMAC 0x00000043UL +++#define CKK_SHA512_256_HMAC 0x00000044UL +++#define CKK_SHA512_T_HMAC 0x00000045UL ++ +++#define CKK_VENDOR_DEFINED 0x80000000UL ++ ++ /* CK_CERTIFICATE_TYPE is a value that identifies a certificate ++- * type ++- */ ++-typedef CK_ULONG CK_CERTIFICATE_TYPE; ++- ++-#define CK_CERTIFICATE_CATEGORY_UNSPECIFIED 0UL ++-#define CK_CERTIFICATE_CATEGORY_TOKEN_USER 1UL ++-#define CK_CERTIFICATE_CATEGORY_AUTHORITY 2UL ++-#define CK_CERTIFICATE_CATEGORY_OTHER_ENTITY 3UL ++- ++-#define CK_SECURITY_DOMAIN_UNSPECIFIED 0UL ++-#define CK_SECURITY_DOMAIN_MANUFACTURER 1UL ++-#define CK_SECURITY_DOMAIN_OPERATOR 2UL ++-#define CK_SECURITY_DOMAIN_THIRD_PARTY 3UL ++- +++ * type */ +++/* CK_CERTIFICATE_TYPE was changed from CK_USHORT to CK_ULONG +++ * for v2.0 */ +++typedef CK_ULONG CK_CERTIFICATE_TYPE; ++ ++ /* The following certificate types are defined: */ ++-#define CKC_X_509 0x00000000UL ++-#define CKC_X_509_ATTR_CERT 0x00000001UL ++-#define CKC_WTLS 0x00000002UL ++-#define CKC_VENDOR_DEFINED 0x80000000UL ++- +++/* CKC_X_509_ATTR_CERT is new for v2.10 */ +++/* CKC_WTLS is new for v2.20 */ +++#define CKC_X_509 0x00000000UL +++#define CKC_X_509_ATTR_CERT 0x00000001UL +++#define CKC_WTLS 0x00000002UL +++#define CKC_VENDOR_DEFINED 0x80000000UL ++ ++ /* CK_ATTRIBUTE_TYPE is a value that identifies an attribute ++- * type ++- */ ++-typedef CK_ULONG CK_ATTRIBUTE_TYPE; +++ * type */ +++/* CK_ATTRIBUTE_TYPE was changed from CK_USHORT to CK_ULONG for +++ * v2.0 */ +++typedef CK_ULONG CK_ATTRIBUTE_TYPE; ++ ++-/* The CKF_ARRAY_ATTRIBUTE flag identifies an attribute which ++- * consists of an array of values. ++- */ ++-#define CKF_ARRAY_ATTRIBUTE 0x40000000UL +++/* values for CKA_CERTIFICATE_CATEGORY v2.20 */ +++typedef CK_ULONG CK_CERTIFICATE_CATEGORY; +++#define CK_CERTIFICATE_CATEGORY_UNSPECIFIED 0UL +++#define CK_CERTIFICATE_CATEGORY_TOKEN_USER 1UL +++#define CK_CERTIFICATE_CATEGORY_AUTHORITY 2UL +++#define CK_CERTIFICATE_CATEGORY_OTHER_ENTITY 3UL ++ ++-/* The following OTP-related defines relate to the CKA_OTP_FORMAT attribute */ ++-#define CK_OTP_FORMAT_DECIMAL 0UL ++-#define CK_OTP_FORMAT_HEXADECIMAL 1UL ++-#define CK_OTP_FORMAT_ALPHANUMERIC 2UL ++-#define CK_OTP_FORMAT_BINARY 3UL +++/* values for CKA_JAVA_MIDP_SECURITY_DOMAIN v2.20 */ +++typedef CK_ULONG CK_JAVA_MIDP_SECURITY_DOMAIN; +++#define CK_SECURITY_DOMAIN_UNSPECIFIED 0UL +++#define CK_SECURITY_DOMAIN_MANUFACTURER 1UL +++#define CK_SECURITY_DOMAIN_OPERATOR 2UL +++#define CK_SECURITY_DOMAIN_THIRD_PARTY 3UL +++ +++/* values for CKA_OTP_FORMAT */ +++#define CK_OTP_FORMAT_DECIMAL 0UL +++#define CK_OTP_FORMAT_HEXADECIMAL 1UL +++#define CK_OTP_FORMAT_ALPHANUMERIC 2UL +++#define CK_OTP_FORMAT_BINARY 3UL +++ +++/* values for CKA_OTP_CHALLENGE_REQUIREMENT, CKA_OTP_TIME_REQUIREMENT, +++ * CKA_OTP_COUNTER_REQUIREMENT, CKA_OTP_PIN_REQUIREMENT */ +++#define CK_OTP_PARAM_IGNORED 0UL +++#define CK_OTP_PARAM_OPTIONAL 1UL +++#define CK_OTP_PARAM_MANDATORY 2UL ++ ++-/* The following OTP-related defines relate to the CKA_OTP_..._REQUIREMENT ++- * attributes ++- */ ++-#define CK_OTP_PARAM_IGNORED 0UL ++-#define CK_OTP_PARAM_OPTIONAL 1UL ++-#define CK_OTP_PARAM_MANDATORY 2UL +++/* The CKF_ARRAY_ATTRIBUTE flag identifies an attribute which +++ consists of an array of values. */ +++#define CKF_ARRAY_ATTRIBUTE 0x40000000UL ++ ++ /* The following attribute types are defined: */ ++-#define CKA_CLASS 0x00000000UL ++-#define CKA_TOKEN 0x00000001UL ++-#define CKA_PRIVATE 0x00000002UL ++-#define CKA_LABEL 0x00000003UL ++-#define CKA_UNIQUE_ID 0x00000004UL ++-#define CKA_APPLICATION 0x00000010UL ++-#define CKA_VALUE 0x00000011UL ++-#define CKA_OBJECT_ID 0x00000012UL ++-#define CKA_CERTIFICATE_TYPE 0x00000080UL ++-#define CKA_ISSUER 0x00000081UL ++-#define CKA_SERIAL_NUMBER 0x00000082UL ++-#define CKA_AC_ISSUER 0x00000083UL ++-#define CKA_OWNER 0x00000084UL ++-#define CKA_ATTR_TYPES 0x00000085UL ++-#define CKA_TRUSTED 0x00000086UL ++-#define CKA_CERTIFICATE_CATEGORY 0x00000087UL ++-#define CKA_JAVA_MIDP_SECURITY_DOMAIN 0x00000088UL ++-#define CKA_URL 0x00000089UL ++-#define CKA_HASH_OF_SUBJECT_PUBLIC_KEY 0x0000008AUL ++-#define CKA_HASH_OF_ISSUER_PUBLIC_KEY 0x0000008BUL ++-#define CKA_NAME_HASH_ALGORITHM 0x0000008CUL ++-#define CKA_CHECK_VALUE 0x00000090UL ++- ++-#define CKA_KEY_TYPE 0x00000100UL ++-#define CKA_SUBJECT 0x00000101UL ++-#define CKA_ID 0x00000102UL ++-#define CKA_SENSITIVE 0x00000103UL ++-#define CKA_ENCRYPT 0x00000104UL ++-#define CKA_DECRYPT 0x00000105UL ++-#define CKA_WRAP 0x00000106UL ++-#define CKA_UNWRAP 0x00000107UL ++-#define CKA_SIGN 0x00000108UL ++-#define CKA_SIGN_RECOVER 0x00000109UL ++-#define CKA_VERIFY 0x0000010AUL ++-#define CKA_VERIFY_RECOVER 0x0000010BUL ++-#define CKA_DERIVE 0x0000010CUL ++-#define CKA_START_DATE 0x00000110UL ++-#define CKA_END_DATE 0x00000111UL ++-#define CKA_MODULUS 0x00000120UL ++-#define CKA_MODULUS_BITS 0x00000121UL ++-#define CKA_PUBLIC_EXPONENT 0x00000122UL ++-#define CKA_PRIVATE_EXPONENT 0x00000123UL ++-#define CKA_PRIME_1 0x00000124UL ++-#define CKA_PRIME_2 0x00000125UL ++-#define CKA_EXPONENT_1 0x00000126UL ++-#define CKA_EXPONENT_2 0x00000127UL ++-#define CKA_COEFFICIENT 0x00000128UL ++-#define CKA_PUBLIC_KEY_INFO 0x00000129UL ++-#define CKA_PRIME 0x00000130UL ++-#define CKA_SUBPRIME 0x00000131UL ++-#define CKA_BASE 0x00000132UL ++- ++-#define CKA_PRIME_BITS 0x00000133UL ++-#define CKA_SUBPRIME_BITS 0x00000134UL ++-#define CKA_SUB_PRIME_BITS CKA_SUBPRIME_BITS ++- ++-#define CKA_VALUE_BITS 0x00000160UL ++-#define CKA_VALUE_LEN 0x00000161UL ++-#define CKA_EXTRACTABLE 0x00000162UL ++-#define CKA_LOCAL 0x00000163UL ++-#define CKA_NEVER_EXTRACTABLE 0x00000164UL ++-#define CKA_ALWAYS_SENSITIVE 0x00000165UL ++-#define CKA_KEY_GEN_MECHANISM 0x00000166UL ++- ++-#define CKA_MODIFIABLE 0x00000170UL ++-#define CKA_COPYABLE 0x00000171UL ++- ++-#define CKA_DESTROYABLE 0x00000172UL ++- ++-#define CKA_ECDSA_PARAMS 0x00000180UL /* Deprecated */ ++-#define CKA_EC_PARAMS 0x00000180UL ++- ++-#define CKA_EC_POINT 0x00000181UL ++- ++-#define CKA_SECONDARY_AUTH 0x00000200UL /* Deprecated */ ++-#define CKA_AUTH_PIN_FLAGS 0x00000201UL /* Deprecated */ ++- ++-#define CKA_ALWAYS_AUTHENTICATE 0x00000202UL ++- ++-#define CKA_WRAP_WITH_TRUSTED 0x00000210UL ++-#define CKA_WRAP_TEMPLATE (CKF_ARRAY_ATTRIBUTE|0x00000211UL) ++-#define CKA_UNWRAP_TEMPLATE (CKF_ARRAY_ATTRIBUTE|0x00000212UL) ++-#define CKA_DERIVE_TEMPLATE (CKF_ARRAY_ATTRIBUTE|0x00000213UL) ++- ++-#define CKA_OTP_FORMAT 0x00000220UL ++-#define CKA_OTP_LENGTH 0x00000221UL ++-#define CKA_OTP_TIME_INTERVAL 0x00000222UL ++-#define CKA_OTP_USER_FRIENDLY_MODE 0x00000223UL +++#define CKA_CLASS 0x00000000UL +++#define CKA_TOKEN 0x00000001UL +++#define CKA_PRIVATE 0x00000002UL +++#define CKA_LABEL 0x00000003UL +++#define CKA_APPLICATION 0x00000010UL +++#define CKA_VALUE 0x00000011UL +++ +++/* CKA_OBJECT_ID is new for v2.10 */ +++#define CKA_OBJECT_ID 0x00000012UL +++ +++#define CKA_CERTIFICATE_TYPE 0x00000080UL +++#define CKA_ISSUER 0x00000081UL +++#define CKA_SERIAL_NUMBER 0x00000082UL +++ +++/* CKA_AC_ISSUER, CKA_OWNER, and CKA_ATTR_TYPES are new +++ * for v2.10 */ +++#define CKA_AC_ISSUER 0x00000083UL +++#define CKA_OWNER 0x00000084UL +++#define CKA_ATTR_TYPES 0x00000085UL +++ +++/* CKA_TRUSTED is new for v2.11 */ +++#define CKA_TRUSTED 0x00000086UL +++ +++/* CKA_CERTIFICATE_CATEGORY ... +++ * CKA_CHECK_VALUE are new for v2.20 */ +++#define CKA_CERTIFICATE_CATEGORY 0x00000087UL +++#define CKA_JAVA_MIDP_SECURITY_DOMAIN 0x00000088UL +++#define CKA_URL 0x00000089UL +++#define CKA_HASH_OF_SUBJECT_PUBLIC_KEY 0x0000008AUL +++#define CKA_HASH_OF_ISSUER_PUBLIC_KEY 0x0000008BUL +++#define CKA_CHECK_VALUE 0x00000090UL +++ +++#define CKA_KEY_TYPE 0x00000100UL +++#define CKA_SUBJECT 0x00000101UL +++#define CKA_ID 0x00000102UL +++#define CKA_SENSITIVE 0x00000103UL +++#define CKA_ENCRYPT 0x00000104UL +++#define CKA_DECRYPT 0x00000105UL +++#define CKA_WRAP 0x00000106UL +++#define CKA_UNWRAP 0x00000107UL +++#define CKA_SIGN 0x00000108UL +++#define CKA_SIGN_RECOVER 0x00000109UL +++#define CKA_VERIFY 0x0000010AUL +++#define CKA_VERIFY_RECOVER 0x0000010BUL +++#define CKA_DERIVE 0x0000010CUL +++#define CKA_START_DATE 0x00000110UL +++#define CKA_END_DATE 0x00000111UL +++#define CKA_MODULUS 0x00000120UL +++#define CKA_MODULUS_BITS 0x00000121UL +++#define CKA_PUBLIC_EXPONENT 0x00000122UL +++#define CKA_PRIVATE_EXPONENT 0x00000123UL +++#define CKA_PRIME_1 0x00000124UL +++#define CKA_PRIME_2 0x00000125UL +++#define CKA_EXPONENT_1 0x00000126UL +++#define CKA_EXPONENT_2 0x00000127UL +++#define CKA_COEFFICIENT 0x00000128UL +++/* CKA_PUBLIC_KEY_INFO is new for v2.40 */ +++#define CKA_PUBLIC_KEY_INFO 0x00000129UL +++#define CKA_PRIME 0x00000130UL +++#define CKA_SUBPRIME 0x00000131UL +++#define CKA_BASE 0x00000132UL +++ +++/* CKA_PRIME_BITS and CKA_SUB_PRIME_BITS are new for v2.11 */ +++#define CKA_PRIME_BITS 0x00000133UL +++#define CKA_SUBPRIME_BITS 0x00000134UL +++#define CKA_SUB_PRIME_BITS CKA_SUBPRIME_BITS +++/* (To retain backwards-compatibility) */ +++ +++#define CKA_VALUE_BITS 0x00000160UL +++#define CKA_VALUE_LEN 0x00000161UL +++ +++/* CKA_EXTRACTABLE, CKA_LOCAL, CKA_NEVER_EXTRACTABLE, +++ * CKA_ALWAYS_SENSITIVE, CKA_MODIFIABLE, CKA_ECDSA_PARAMS, +++ * and CKA_EC_POINT are new for v2.0 */ +++#define CKA_EXTRACTABLE 0x00000162UL +++#define CKA_LOCAL 0x00000163UL +++#define CKA_NEVER_EXTRACTABLE 0x00000164UL +++#define CKA_ALWAYS_SENSITIVE 0x00000165UL +++ +++/* CKA_KEY_GEN_MECHANISM is new for v2.11 */ +++#define CKA_KEY_GEN_MECHANISM 0x00000166UL +++ +++#define CKA_MODIFIABLE 0x00000170UL +++ +++/* New for 2.40 */ +++#define CKA_COPYABLE 0x00000171UL +++#define CKA_DESTROYABLE 0x00000172UL +++ +++/* CKA_ECDSA_PARAMS is deprecated in v2.11, +++ * CKA_EC_PARAMS is preferred. */ +++#define CKA_ECDSA_PARAMS 0x00000180UL +++#define CKA_EC_PARAMS 0x00000180UL +++ +++#define CKA_EC_POINT 0x00000181UL +++ +++/* CKA_SECONDARY_AUTH, CKA_AUTH_PIN_FLAGS, +++ * are new for v2.10. Deprecated in v2.11 and onwards. */ +++#define CKA_SECONDARY_AUTH 0x00000200UL +++#define CKA_AUTH_PIN_FLAGS 0x00000201UL +++ +++/* CKA_ALWAYS_AUTHENTICATE ... +++ * CKA_UNWRAP_TEMPLATE are new for v2.20 */ +++#define CKA_ALWAYS_AUTHENTICATE 0x00000202UL +++ +++#define CKA_WRAP_WITH_TRUSTED 0x00000210UL +++#define CKA_WRAP_TEMPLATE (CKF_ARRAY_ATTRIBUTE | 0x00000211UL) +++#define CKA_UNWRAP_TEMPLATE (CKF_ARRAY_ATTRIBUTE | 0x00000212UL) +++ +++/* new for 2.40 */ +++#define CKA_DERIVE_TEMPLATE (CKF_ARRAY_ATTRIBUTE | 0x00000213UL) +++#define CKA_OTP_FORMAT 0x00000220UL +++#define CKA_OTP_LENGTH 0x00000221UL +++#define CKA_OTP_TIME_INTERVAL 0x00000222UL +++#define CKA_OTP_USER_FRIENDLY_MODE 0x00000223UL ++ #define CKA_OTP_CHALLENGE_REQUIREMENT 0x00000224UL ++-#define CKA_OTP_TIME_REQUIREMENT 0x00000225UL ++-#define CKA_OTP_COUNTER_REQUIREMENT 0x00000226UL ++-#define CKA_OTP_PIN_REQUIREMENT 0x00000227UL ++-#define CKA_OTP_COUNTER 0x0000022EUL ++-#define CKA_OTP_TIME 0x0000022FUL ++-#define CKA_OTP_USER_IDENTIFIER 0x0000022AUL ++-#define CKA_OTP_SERVICE_IDENTIFIER 0x0000022BUL ++-#define CKA_OTP_SERVICE_LOGO 0x0000022CUL ++-#define CKA_OTP_SERVICE_LOGO_TYPE 0x0000022DUL ++- ++-#define CKA_GOSTR3410_PARAMS 0x00000250UL ++-#define CKA_GOSTR3411_PARAMS 0x00000251UL ++-#define CKA_GOST28147_PARAMS 0x00000252UL ++- ++-#define CKA_HW_FEATURE_TYPE 0x00000300UL ++-#define CKA_RESET_ON_INIT 0x00000301UL ++-#define CKA_HAS_RESET 0x00000302UL ++- ++-#define CKA_PIXEL_X 0x00000400UL ++-#define CKA_PIXEL_Y 0x00000401UL ++-#define CKA_RESOLUTION 0x00000402UL ++-#define CKA_CHAR_ROWS 0x00000403UL ++-#define CKA_CHAR_COLUMNS 0x00000404UL ++-#define CKA_COLOR 0x00000405UL ++-#define CKA_BITS_PER_PIXEL 0x00000406UL ++-#define CKA_CHAR_SETS 0x00000480UL ++-#define CKA_ENCODING_METHODS 0x00000481UL ++-#define CKA_MIME_TYPES 0x00000482UL ++-#define CKA_MECHANISM_TYPE 0x00000500UL ++-#define CKA_REQUIRED_CMS_ATTRIBUTES 0x00000501UL ++-#define CKA_DEFAULT_CMS_ATTRIBUTES 0x00000502UL ++-#define CKA_SUPPORTED_CMS_ATTRIBUTES 0x00000503UL ++-#define CKA_ALLOWED_MECHANISMS (CKF_ARRAY_ATTRIBUTE|0x00000600UL) ++-#define CKA_PROFILE_ID 0x00000601UL ++-#define CKA_X2RATCHET_BAG 0x00000602UL ++-#define CKA_X2RATCHET_BAGSIZE 0x00000603UL ++-#define CKA_X2RATCHET_BOBS1STMSG 0x00000604UL ++-#define CKA_X2RATCHET_CKR 0x00000605UL ++-#define CKA_X2RATCHET_CKS 0x00000606UL ++-#define CKA_X2RATCHET_DHP 0x00000607UL ++-#define CKA_X2RATCHET_DHR 0x00000608UL ++-#define CKA_X2RATCHET_DHS 0x00000609UL ++-#define CKA_X2RATCHET_HKR 0x0000060aUL ++-#define CKA_X2RATCHET_HKS 0x0000060bUL ++-#define CKA_X2RATCHET_ISALICE 0x0000060cUL ++-#define CKA_X2RATCHET_NHKR 0x0000060dUL ++-#define CKA_X2RATCHET_NHKS 0x0000060eUL ++-#define CKA_X2RATCHET_NR 0x0000060fUL ++-#define CKA_X2RATCHET_NS 0x00000610UL ++-#define CKA_X2RATCHET_PNS 0x00000611UL ++-#define CKA_X2RATCHET_RK 0x00000612UL +++#define CKA_OTP_TIME_REQUIREMENT 0x00000225UL +++#define CKA_OTP_COUNTER_REQUIREMENT 0x00000226UL +++#define CKA_OTP_PIN_REQUIREMENT 0x00000227UL +++#define CKA_OTP_COUNTER 0x0000022EUL +++#define CKA_OTP_TIME 0x0000022FUL +++#define CKA_OTP_USER_IDENTIFIER 0x0000022AUL +++#define CKA_OTP_SERVICE_IDENTIFIER 0x0000022BUL +++#define CKA_OTP_SERVICE_LOGO 0x0000022CUL +++#define CKA_OTP_SERVICE_LOGO_TYPE 0x0000022DUL +++#define CKA_GOSTR3410_PARAMS 0x00000250UL +++#define CKA_GOSTR3411_PARAMS 0x00000251UL +++#define CKA_GOST28147_PARAMS 0x00000252UL +++ +++/* CKA_HW_FEATURE_TYPE, CKA_RESET_ON_INIT, and CKA_HAS_RESET +++ * are new for v2.10 */ +++#define CKA_HW_FEATURE_TYPE 0x00000300UL +++#define CKA_RESET_ON_INIT 0x00000301UL +++#define CKA_HAS_RESET 0x00000302UL +++ +++/* The following attributes are new for v2.20 */ +++#define CKA_PIXEL_X 0x00000400UL +++#define CKA_PIXEL_Y 0x00000401UL +++#define CKA_RESOLUTION 0x00000402UL +++#define CKA_CHAR_ROWS 0x00000403UL +++#define CKA_CHAR_COLUMNS 0x00000404UL +++#define CKA_COLOR 0x00000405UL +++#define CKA_BITS_PER_PIXEL 0x00000406UL +++#define CKA_CHAR_SETS 0x00000480UL +++#define CKA_ENCODING_METHODS 0x00000481UL +++#define CKA_MIME_TYPES 0x00000482UL +++#define CKA_MECHANISM_TYPE 0x00000500UL +++#define CKA_REQUIRED_CMS_ATTRIBUTES 0x00000501UL +++#define CKA_DEFAULT_CMS_ATTRIBUTES 0x00000502UL +++#define CKA_SUPPORTED_CMS_ATTRIBUTES 0x00000503UL +++#define CKA_ALLOWED_MECHANISMS (CKF_ARRAY_ATTRIBUTE | 0x00000600UL) +++ +++/* new for v3.0 */ +++#define CKA_PROFILE_ID 0x00000601UL +++#define CKA_X2RATCHET_BAG 0x00000602UL +++#define CKA_X2RATCHET_BAGSIZE 0x00000603UL +++#define CKA_X2RATCHET_BOBS1STMSG 0x00000604UL +++#define CKA_X2RATCHET_CKR 0x00000605UL +++#define CKA_X2RATCHET_CKS 0x00000606UL +++#define CKA_X2RATCHET_DHP 0x00000607UL +++#define CKA_X2RATCHET_DHR 0x00000608UL +++#define CKA_X2RATCHET_DHS 0x00000609UL +++#define CKA_X2RATCHET_HKR 0x0000060aUL +++#define CKA_X2RATCHET_HKS 0x0000060bUL +++#define CKA_X2RATCHET_ISALICE 0x0000060cUL +++#define CKA_X2RATCHET_NHKR 0x0000060dUL +++#define CKA_X2RATCHET_NHKS 0x0000060eUL +++#define CKA_X2RATCHET_NR 0x0000060fUL +++#define CKA_X2RATCHET_NS 0x00000610UL +++#define CKA_X2RATCHET_PNS 0x00000611UL +++#define CKA_X2RATCHET_RK 0x00000612UL ++ ++-#define CKA_VENDOR_DEFINED 0x80000000UL +++#define CKA_VENDOR_DEFINED 0x80000000UL ++ ++ /* CK_ATTRIBUTE is a structure that includes the type, length ++- * and value of an attribute ++- */ +++ * and value of an attribute */ ++ typedef struct CK_ATTRIBUTE { ++ CK_ATTRIBUTE_TYPE type; ++- CK_VOID_PTR pValue; ++- CK_ULONG ulValueLen; /* in bytes */ +++ CK_VOID_PTR pValue; +++ +++ /* ulValueLen went from CK_USHORT to CK_ULONG for v2.0 */ +++ CK_ULONG ulValueLen; /* in bytes */ ++ } CK_ATTRIBUTE; ++ ++ typedef CK_ATTRIBUTE CK_PTR CK_ATTRIBUTE_PTR; ++ ++ /* CK_DATE is a structure that defines a date */ ++-typedef struct CK_DATE{ ++- CK_CHAR year[4]; /* the year ("1900" - "9999") */ ++- CK_CHAR month[2]; /* the month ("01" - "12") */ ++- CK_CHAR day[2]; /* the day ("01" - "31") */ +++typedef struct CK_DATE { +++ CK_CHAR year[4]; /* the year ("1900" - "9999") */ +++ CK_CHAR month[2]; /* the month ("01" - "12") */ +++ CK_CHAR day[2]; /* the day ("01" - "31") */ ++ } CK_DATE; ++ ++- ++ /* CK_MECHANISM_TYPE is a value that identifies a mechanism ++- * type ++- */ ++-typedef CK_ULONG CK_MECHANISM_TYPE; +++ * type */ +++/* CK_MECHANISM_TYPE was changed from CK_USHORT to CK_ULONG for +++ * v2.0 */ +++typedef CK_ULONG CK_MECHANISM_TYPE; ++ ++ /* the following mechanism types are defined: */ ++-#define CKM_RSA_PKCS_KEY_PAIR_GEN 0x00000000UL ++-#define CKM_RSA_PKCS 0x00000001UL ++-#define CKM_RSA_9796 0x00000002UL ++-#define CKM_RSA_X_509 0x00000003UL ++- ++-#define CKM_MD2_RSA_PKCS 0x00000004UL ++-#define CKM_MD5_RSA_PKCS 0x00000005UL ++-#define CKM_SHA1_RSA_PKCS 0x00000006UL ++- ++-#define CKM_RIPEMD128_RSA_PKCS 0x00000007UL ++-#define CKM_RIPEMD160_RSA_PKCS 0x00000008UL ++-#define CKM_RSA_PKCS_OAEP 0x00000009UL ++- ++-#define CKM_RSA_X9_31_KEY_PAIR_GEN 0x0000000AUL ++-#define CKM_RSA_X9_31 0x0000000BUL ++-#define CKM_SHA1_RSA_X9_31 0x0000000CUL ++-#define CKM_RSA_PKCS_PSS 0x0000000DUL ++-#define CKM_SHA1_RSA_PKCS_PSS 0x0000000EUL ++- ++-#define CKM_DSA_KEY_PAIR_GEN 0x00000010UL ++-#define CKM_DSA 0x00000011UL ++-#define CKM_DSA_SHA1 0x00000012UL ++-#define CKM_DSA_SHA224 0x00000013UL ++-#define CKM_DSA_SHA256 0x00000014UL ++-#define CKM_DSA_SHA384 0x00000015UL ++-#define CKM_DSA_SHA512 0x00000016UL ++-#define CKM_DSA_SHA3_224 0x00000018UL ++-#define CKM_DSA_SHA3_256 0x00000019UL ++-#define CKM_DSA_SHA3_384 0x0000001AUL ++-#define CKM_DSA_SHA3_512 0x0000001BUL ++- ++-#define CKM_DH_PKCS_KEY_PAIR_GEN 0x00000020UL ++-#define CKM_DH_PKCS_DERIVE 0x00000021UL ++- ++-#define CKM_X9_42_DH_KEY_PAIR_GEN 0x00000030UL ++-#define CKM_X9_42_DH_DERIVE 0x00000031UL ++-#define CKM_X9_42_DH_HYBRID_DERIVE 0x00000032UL ++-#define CKM_X9_42_MQV_DERIVE 0x00000033UL ++- ++-#define CKM_SHA256_RSA_PKCS 0x00000040UL ++-#define CKM_SHA384_RSA_PKCS 0x00000041UL ++-#define CKM_SHA512_RSA_PKCS 0x00000042UL ++-#define CKM_SHA256_RSA_PKCS_PSS 0x00000043UL ++-#define CKM_SHA384_RSA_PKCS_PSS 0x00000044UL ++-#define CKM_SHA512_RSA_PKCS_PSS 0x00000045UL ++- ++-#define CKM_SHA224_RSA_PKCS 0x00000046UL ++-#define CKM_SHA224_RSA_PKCS_PSS 0x00000047UL ++- ++-#define CKM_SHA512_224 0x00000048UL ++-#define CKM_SHA512_224_HMAC 0x00000049UL ++-#define CKM_SHA512_224_HMAC_GENERAL 0x0000004AUL ++-#define CKM_SHA512_224_KEY_DERIVATION 0x0000004BUL ++-#define CKM_SHA512_256 0x0000004CUL ++-#define CKM_SHA512_256_HMAC 0x0000004DUL ++-#define CKM_SHA512_256_HMAC_GENERAL 0x0000004EUL ++-#define CKM_SHA512_256_KEY_DERIVATION 0x0000004FUL ++- ++-#define CKM_SHA512_T 0x00000050UL ++-#define CKM_SHA512_T_HMAC 0x00000051UL ++-#define CKM_SHA512_T_HMAC_GENERAL 0x00000052UL ++-#define CKM_SHA512_T_KEY_DERIVATION 0x00000053UL ++- ++-#define CKM_SHA3_256_RSA_PKCS 0x00000060UL ++-#define CKM_SHA3_384_RSA_PKCS 0x00000061UL ++-#define CKM_SHA3_512_RSA_PKCS 0x00000062UL ++-#define CKM_SHA3_256_RSA_PKCS_PSS 0x00000063UL ++-#define CKM_SHA3_384_RSA_PKCS_PSS 0x00000064UL ++-#define CKM_SHA3_512_RSA_PKCS_PSS 0x00000065UL ++-#define CKM_SHA3_224_RSA_PKCS 0x00000066UL ++-#define CKM_SHA3_224_RSA_PKCS_PSS 0x00000067UL ++- ++-#define CKM_RC2_KEY_GEN 0x00000100UL ++-#define CKM_RC2_ECB 0x00000101UL ++-#define CKM_RC2_CBC 0x00000102UL ++-#define CKM_RC2_MAC 0x00000103UL ++- ++-#define CKM_RC2_MAC_GENERAL 0x00000104UL ++-#define CKM_RC2_CBC_PAD 0x00000105UL ++- ++-#define CKM_RC4_KEY_GEN 0x00000110UL ++-#define CKM_RC4 0x00000111UL ++-#define CKM_DES_KEY_GEN 0x00000120UL ++-#define CKM_DES_ECB 0x00000121UL ++-#define CKM_DES_CBC 0x00000122UL ++-#define CKM_DES_MAC 0x00000123UL ++- ++-#define CKM_DES_MAC_GENERAL 0x00000124UL ++-#define CKM_DES_CBC_PAD 0x00000125UL ++- ++-#define CKM_DES2_KEY_GEN 0x00000130UL ++-#define CKM_DES3_KEY_GEN 0x00000131UL ++-#define CKM_DES3_ECB 0x00000132UL ++-#define CKM_DES3_CBC 0x00000133UL ++-#define CKM_DES3_MAC 0x00000134UL ++- ++-#define CKM_DES3_MAC_GENERAL 0x00000135UL ++-#define CKM_DES3_CBC_PAD 0x00000136UL ++-#define CKM_DES3_CMAC_GENERAL 0x00000137UL ++-#define CKM_DES3_CMAC 0x00000138UL ++-#define CKM_CDMF_KEY_GEN 0x00000140UL ++-#define CKM_CDMF_ECB 0x00000141UL ++-#define CKM_CDMF_CBC 0x00000142UL ++-#define CKM_CDMF_MAC 0x00000143UL ++-#define CKM_CDMF_MAC_GENERAL 0x00000144UL ++-#define CKM_CDMF_CBC_PAD 0x00000145UL ++- ++-#define CKM_DES_OFB64 0x00000150UL ++-#define CKM_DES_OFB8 0x00000151UL ++-#define CKM_DES_CFB64 0x00000152UL ++-#define CKM_DES_CFB8 0x00000153UL ++- ++-#define CKM_MD2 0x00000200UL ++- ++-#define CKM_MD2_HMAC 0x00000201UL ++-#define CKM_MD2_HMAC_GENERAL 0x00000202UL ++- ++-#define CKM_MD5 0x00000210UL ++- ++-#define CKM_MD5_HMAC 0x00000211UL ++-#define CKM_MD5_HMAC_GENERAL 0x00000212UL ++- ++-#define CKM_SHA_1 0x00000220UL ++- ++-#define CKM_SHA_1_HMAC 0x00000221UL ++-#define CKM_SHA_1_HMAC_GENERAL 0x00000222UL ++- ++-#define CKM_RIPEMD128 0x00000230UL ++-#define CKM_RIPEMD128_HMAC 0x00000231UL ++-#define CKM_RIPEMD128_HMAC_GENERAL 0x00000232UL ++-#define CKM_RIPEMD160 0x00000240UL ++-#define CKM_RIPEMD160_HMAC 0x00000241UL ++-#define CKM_RIPEMD160_HMAC_GENERAL 0x00000242UL ++- ++-#define CKM_SHA256 0x00000250UL ++-#define CKM_SHA256_HMAC 0x00000251UL ++-#define CKM_SHA256_HMAC_GENERAL 0x00000252UL ++-#define CKM_SHA224 0x00000255UL ++-#define CKM_SHA224_HMAC 0x00000256UL ++-#define CKM_SHA224_HMAC_GENERAL 0x00000257UL ++-#define CKM_SHA384 0x00000260UL ++-#define CKM_SHA384_HMAC 0x00000261UL ++-#define CKM_SHA384_HMAC_GENERAL 0x00000262UL ++-#define CKM_SHA512 0x00000270UL ++-#define CKM_SHA512_HMAC 0x00000271UL ++-#define CKM_SHA512_HMAC_GENERAL 0x00000272UL ++-#define CKM_SECURID_KEY_GEN 0x00000280UL ++-#define CKM_SECURID 0x00000282UL ++-#define CKM_HOTP_KEY_GEN 0x00000290UL ++-#define CKM_HOTP 0x00000291UL ++-#define CKM_ACTI 0x000002A0UL ++-#define CKM_ACTI_KEY_GEN 0x000002A1UL ++- ++-#define CKM_SHA3_256 0x000002B0UL ++-#define CKM_SHA3_256_HMAC 0x000002B1UL ++-#define CKM_SHA3_256_HMAC_GENERAL 0x000002B2UL ++-#define CKM_SHA3_256_KEY_GEN 0x000002B3UL ++-#define CKM_SHA3_224 0x000002B5UL ++-#define CKM_SHA3_224_HMAC 0x000002B6UL ++-#define CKM_SHA3_224_HMAC_GENERAL 0x000002B7UL ++-#define CKM_SHA3_224_KEY_GEN 0x000002B8UL ++-#define CKM_SHA3_384 0x000002C0UL ++-#define CKM_SHA3_384_HMAC 0x000002C1UL ++-#define CKM_SHA3_384_HMAC_GENERAL 0x000002C2UL ++-#define CKM_SHA3_384_KEY_GEN 0x000002C3UL ++-#define CKM_SHA3_512 0x000002D0UL ++-#define CKM_SHA3_512_HMAC 0x000002D1UL ++-#define CKM_SHA3_512_HMAC_GENERAL 0x000002D2UL ++-#define CKM_SHA3_512_KEY_GEN 0x000002D3UL ++- ++- ++-#define CKM_CAST_KEY_GEN 0x00000300UL ++-#define CKM_CAST_ECB 0x00000301UL ++-#define CKM_CAST_CBC 0x00000302UL ++-#define CKM_CAST_MAC 0x00000303UL ++-#define CKM_CAST_MAC_GENERAL 0x00000304UL ++-#define CKM_CAST_CBC_PAD 0x00000305UL ++-#define CKM_CAST3_KEY_GEN 0x00000310UL ++-#define CKM_CAST3_ECB 0x00000311UL ++-#define CKM_CAST3_CBC 0x00000312UL ++-#define CKM_CAST3_MAC 0x00000313UL ++-#define CKM_CAST3_MAC_GENERAL 0x00000314UL ++-#define CKM_CAST3_CBC_PAD 0x00000315UL +++#define CKM_RSA_PKCS_KEY_PAIR_GEN 0x00000000UL +++#define CKM_RSA_PKCS 0x00000001UL +++#define CKM_RSA_9796 0x00000002UL +++#define CKM_RSA_X_509 0x00000003UL +++ +++/* CKM_MD2_RSA_PKCS, CKM_MD5_RSA_PKCS, and CKM_SHA1_RSA_PKCS +++ * are new for v2.0. They are mechanisms which hash and sign */ +++#define CKM_MD2_RSA_PKCS 0x00000004UL +++#define CKM_MD5_RSA_PKCS 0x00000005UL +++#define CKM_SHA1_RSA_PKCS 0x00000006UL +++ +++/* CKM_RIPEMD128_RSA_PKCS, CKM_RIPEMD160_RSA_PKCS, and +++ * CKM_RSA_PKCS_OAEP are new for v2.10 */ +++#define CKM_RIPEMD128_RSA_PKCS 0x00000007UL +++#define CKM_RIPEMD160_RSA_PKCS 0x00000008UL +++#define CKM_RSA_PKCS_OAEP 0x00000009UL +++ +++/* CKM_RSA_X9_31_KEY_PAIR_GEN, CKM_RSA_X9_31, CKM_SHA1_RSA_X9_31, +++ * CKM_RSA_PKCS_PSS, and CKM_SHA1_RSA_PKCS_PSS are new for v2.11 */ +++#define CKM_RSA_X9_31_KEY_PAIR_GEN 0x0000000AUL +++#define CKM_RSA_X9_31 0x0000000BUL +++#define CKM_SHA1_RSA_X9_31 0x0000000CUL +++#define CKM_RSA_PKCS_PSS 0x0000000DUL +++#define CKM_SHA1_RSA_PKCS_PSS 0x0000000EUL +++ +++#define CKM_DSA_KEY_PAIR_GEN 0x00000010UL +++#define CKM_DSA 0x00000011UL +++#define CKM_DSA_SHA1 0x00000012UL +++ +++/* new for v2.40 */ +++#define CKM_DSA_SHA224 0x00000013UL +++#define CKM_DSA_SHA256 0x00000014UL +++#define CKM_DSA_SHA384 0x00000015UL +++#define CKM_DSA_SHA512 0x00000016UL +++#define CKM_DSA_SHA3_224 0x00000018UL +++#define CKM_DSA_SHA3_256 0x00000019UL +++#define CKM_DSA_SHA3_384 0x0000001AUL +++#define CKM_DSA_SHA3_512 0x0000001BUL +++ +++#define CKM_DH_PKCS_KEY_PAIR_GEN 0x00000020UL +++#define CKM_DH_PKCS_DERIVE 0x00000021UL +++ +++/* CKM_X9_42_DH_KEY_PAIR_GEN, CKM_X9_42_DH_DERIVE, +++ * CKM_X9_42_DH_HYBRID_DERIVE, and CKM_X9_42_MQV_DERIVE are new for +++ * v2.11 */ +++#define CKM_X9_42_DH_KEY_PAIR_GEN 0x00000030UL +++#define CKM_X9_42_DH_DERIVE 0x00000031UL +++#define CKM_X9_42_DH_HYBRID_DERIVE 0x00000032UL +++#define CKM_X9_42_MQV_DERIVE 0x00000033UL +++ +++/* CKM_SHA256/384/512 are new for v2.20 */ +++#define CKM_SHA256_RSA_PKCS 0x00000040UL +++#define CKM_SHA384_RSA_PKCS 0x00000041UL +++#define CKM_SHA512_RSA_PKCS 0x00000042UL +++#define CKM_SHA256_RSA_PKCS_PSS 0x00000043UL +++#define CKM_SHA384_RSA_PKCS_PSS 0x00000044UL +++#define CKM_SHA512_RSA_PKCS_PSS 0x00000045UL +++ +++/* CKM_SHA224 new for v2.20 amendment 3 */ +++#define CKM_SHA224_RSA_PKCS 0x00000046UL +++#define CKM_SHA224_RSA_PKCS_PSS 0x00000047UL +++ +++/* new for v2.40 */ +++#define CKM_SHA512_224 0x00000048UL +++#define CKM_SHA512_224_HMAC 0x00000049UL +++#define CKM_SHA512_224_HMAC_GENERAL 0x0000004AUL +++#define CKM_SHA512_224_KEY_DERIVATION 0x0000004BUL +++#define CKM_SHA512_256 0x0000004CUL +++#define CKM_SHA512_256_HMAC 0x0000004DUL +++#define CKM_SHA512_256_HMAC_GENERAL 0x0000004EUL +++#define CKM_SHA512_256_KEY_DERIVATION 0x0000004FUL +++#define CKM_SHA512_T 0x00000050UL +++#define CKM_SHA512_T_HMAC 0x00000051UL +++#define CKM_SHA512_T_HMAC_GENERAL 0x00000052UL +++#define CKM_SHA512_T_KEY_DERIVATION 0x00000053UL +++#define CKM_SHA3_256_RSA_PKCS 0x00000060UL +++#define CKM_SHA3_384_RSA_PKCS 0x00000061UL +++#define CKM_SHA3_512_RSA_PKCS 0x00000062UL +++#define CKM_SHA3_256_RSA_PKCS_PSS 0x00000063UL +++#define CKM_SHA3_384_RSA_PKCS_PSS 0x00000064UL +++#define CKM_SHA3_512_RSA_PKCS_PSS 0x00000065UL +++#define CKM_SHA3_224_RSA_PKCS 0x00000066UL +++#define CKM_SHA3_224_RSA_PKCS_PSS 0x00000067UL +++ +++#define CKM_RC2_KEY_GEN 0x00000100UL +++#define CKM_RC2_ECB 0x00000101UL +++#define CKM_RC2_CBC 0x00000102UL +++#define CKM_RC2_MAC 0x00000103UL +++ +++/* CKM_RC2_MAC_GENERAL and CKM_RC2_CBC_PAD are new for v2.0 */ +++#define CKM_RC2_MAC_GENERAL 0x00000104UL +++#define CKM_RC2_CBC_PAD 0x00000105UL +++ +++#define CKM_RC4_KEY_GEN 0x00000110UL +++#define CKM_RC4 0x00000111UL +++#define CKM_DES_KEY_GEN 0x00000120UL +++#define CKM_DES_ECB 0x00000121UL +++#define CKM_DES_CBC 0x00000122UL +++#define CKM_DES_MAC 0x00000123UL +++ +++/* CKM_DES_MAC_GENERAL and CKM_DES_CBC_PAD are new for v2.0 */ +++#define CKM_DES_MAC_GENERAL 0x00000124UL +++#define CKM_DES_CBC_PAD 0x00000125UL +++ +++#define CKM_DES2_KEY_GEN 0x00000130UL +++#define CKM_DES3_KEY_GEN 0x00000131UL +++#define CKM_DES3_ECB 0x00000132UL +++#define CKM_DES3_CBC 0x00000133UL +++#define CKM_DES3_MAC 0x00000134UL +++ +++/* CKM_DES3_MAC_GENERAL, CKM_DES3_CBC_PAD, CKM_CDMF_KEY_GEN, +++ * CKM_CDMF_ECB, CKM_CDMF_CBC, CKM_CDMF_MAC, +++ * CKM_CDMF_MAC_GENERAL, and CKM_CDMF_CBC_PAD are new for v2.0 */ +++#define CKM_DES3_MAC_GENERAL 0x00000135UL +++#define CKM_DES3_CBC_PAD 0x00000136UL +++#define CKM_CDMF_KEY_GEN 0x00000140UL +++#define CKM_CDMF_ECB 0x00000141UL +++#define CKM_CDMF_CBC 0x00000142UL +++#define CKM_CDMF_MAC 0x00000143UL +++#define CKM_CDMF_MAC_GENERAL 0x00000144UL +++#define CKM_CDMF_CBC_PAD 0x00000145UL +++ +++/* the following four DES mechanisms are new for v2.20 */ +++#define CKM_DES_OFB64 0x00000150UL +++#define CKM_DES_OFB8 0x00000151UL +++#define CKM_DES_CFB64 0x00000152UL +++#define CKM_DES_CFB8 0x00000153UL +++ +++#define CKM_MD2 0x00000200UL +++ +++/* CKM_MD2_HMAC and CKM_MD2_HMAC_GENERAL are new for v2.0 */ +++#define CKM_MD2_HMAC 0x00000201UL +++#define CKM_MD2_HMAC_GENERAL 0x00000202UL +++ +++#define CKM_MD5 0x00000210UL +++ +++/* CKM_MD5_HMAC and CKM_MD5_HMAC_GENERAL are new for v2.0 */ +++#define CKM_MD5_HMAC 0x00000211UL +++#define CKM_MD5_HMAC_GENERAL 0x00000212UL +++ +++#define CKM_SHA_1 0x00000220UL +++ +++/* CKM_SHA_1_HMAC and CKM_SHA_1_HMAC_GENERAL are new for v2.0 */ +++#define CKM_SHA_1_HMAC 0x00000221UL +++#define CKM_SHA_1_HMAC_GENERAL 0x00000222UL +++ +++/* CKM_RIPEMD128, CKM_RIPEMD128_HMAC, +++ * CKM_RIPEMD128_HMAC_GENERAL, CKM_RIPEMD160, CKM_RIPEMD160_HMAC, +++ * and CKM_RIPEMD160_HMAC_GENERAL are new for v2.10 */ +++#define CKM_RIPEMD128 0x00000230UL +++#define CKM_RIPEMD128_HMAC 0x00000231UL +++#define CKM_RIPEMD128_HMAC_GENERAL 0x00000232UL +++#define CKM_RIPEMD160 0x00000240UL +++#define CKM_RIPEMD160_HMAC 0x00000241UL +++#define CKM_RIPEMD160_HMAC_GENERAL 0x00000242UL +++ +++/* CKM_SHA256/384/512 are new for v2.20 */ +++#define CKM_SHA256 0x00000250UL +++#define CKM_SHA256_HMAC 0x00000251UL +++#define CKM_SHA256_HMAC_GENERAL 0x00000252UL +++#define CKM_SHA384 0x00000260UL +++#define CKM_SHA384_HMAC 0x00000261UL +++#define CKM_SHA384_HMAC_GENERAL 0x00000262UL +++#define CKM_SHA512 0x00000270UL +++#define CKM_SHA512_HMAC 0x00000271UL +++#define CKM_SHA512_HMAC_GENERAL 0x00000272UL +++ +++/* CKM_SHA224 new for v2.20 amendment 3 */ +++#define CKM_SHA224 0x00000255UL +++#define CKM_SHA224_HMAC 0x00000256UL +++#define CKM_SHA224_HMAC_GENERAL 0x00000257UL +++ +++/* new for v2.40 */ +++#define CKM_SECURID_KEY_GEN 0x00000280UL +++#define CKM_SECURID 0x00000282UL +++#define CKM_HOTP_KEY_GEN 0x00000290UL +++#define CKM_HOTP 0x00000291UL +++#define CKM_ACTI 0x000002A0UL +++#define CKM_ACTI_KEY_GEN 0x000002A1UL +++#define CKM_SHA3_256 0x000002B0UL +++#define CKM_SHA3_256_HMAC 0x000002B1UL +++#define CKM_SHA3_256_HMAC_GENERAL 0x000002B2UL +++#define CKM_SHA3_256_KEY_GEN 0x000002B3UL +++#define CKM_SHA3_224 0x000002B5UL +++#define CKM_SHA3_224_HMAC 0x000002B6UL +++#define CKM_SHA3_224_HMAC_GENERAL 0x000002B7UL +++#define CKM_SHA3_224_KEY_GEN 0x000002B8UL +++#define CKM_SHA3_384 0x000002C0UL +++#define CKM_SHA3_384_HMAC 0x000002C1UL +++#define CKM_SHA3_384_HMAC_GENERAL 0x000002C2UL +++#define CKM_SHA3_384_KEY_GEN 0x000002C3UL +++#define CKM_SHA3_512 0x000002D0UL +++#define CKM_SHA3_512_HMAC 0x000002D1UL +++#define CKM_SHA3_512_HMAC_GENERAL 0x000002D2UL +++#define CKM_SHA3_512_KEY_GEN 0x000002D3UL +++ +++/* All of the following mechanisms are new for v2.0 */ ++ /* Note that CAST128 and CAST5 are the same algorithm */ ++-#define CKM_CAST5_KEY_GEN 0x00000320UL ++-#define CKM_CAST128_KEY_GEN 0x00000320UL ++-#define CKM_CAST5_ECB 0x00000321UL ++-#define CKM_CAST128_ECB 0x00000321UL ++-#define CKM_CAST5_CBC 0x00000322UL /* Deprecated */ ++-#define CKM_CAST128_CBC 0x00000322UL ++-#define CKM_CAST5_MAC 0x00000323UL /* Deprecated */ ++-#define CKM_CAST128_MAC 0x00000323UL ++-#define CKM_CAST5_MAC_GENERAL 0x00000324UL /* Deprecated */ ++-#define CKM_CAST128_MAC_GENERAL 0x00000324UL ++-#define CKM_CAST5_CBC_PAD 0x00000325UL /* Deprecated */ ++-#define CKM_CAST128_CBC_PAD 0x00000325UL ++-#define CKM_RC5_KEY_GEN 0x00000330UL ++-#define CKM_RC5_ECB 0x00000331UL ++-#define CKM_RC5_CBC 0x00000332UL ++-#define CKM_RC5_MAC 0x00000333UL ++-#define CKM_RC5_MAC_GENERAL 0x00000334UL ++-#define CKM_RC5_CBC_PAD 0x00000335UL ++-#define CKM_IDEA_KEY_GEN 0x00000340UL ++-#define CKM_IDEA_ECB 0x00000341UL ++-#define CKM_IDEA_CBC 0x00000342UL ++-#define CKM_IDEA_MAC 0x00000343UL ++-#define CKM_IDEA_MAC_GENERAL 0x00000344UL ++-#define CKM_IDEA_CBC_PAD 0x00000345UL ++-#define CKM_GENERIC_SECRET_KEY_GEN 0x00000350UL ++-#define CKM_CONCATENATE_BASE_AND_KEY 0x00000360UL ++-#define CKM_CONCATENATE_BASE_AND_DATA 0x00000362UL ++-#define CKM_CONCATENATE_DATA_AND_BASE 0x00000363UL ++-#define CKM_XOR_BASE_AND_DATA 0x00000364UL ++-#define CKM_EXTRACT_KEY_FROM_KEY 0x00000365UL ++-#define CKM_SSL3_PRE_MASTER_KEY_GEN 0x00000370UL ++-#define CKM_SSL3_MASTER_KEY_DERIVE 0x00000371UL ++-#define CKM_SSL3_KEY_AND_MAC_DERIVE 0x00000372UL ++- ++-#define CKM_SSL3_MASTER_KEY_DERIVE_DH 0x00000373UL ++-#define CKM_TLS_PRE_MASTER_KEY_GEN 0x00000374UL ++-#define CKM_TLS_MASTER_KEY_DERIVE 0x00000375UL ++-#define CKM_TLS_KEY_AND_MAC_DERIVE 0x00000376UL ++-#define CKM_TLS_MASTER_KEY_DERIVE_DH 0x00000377UL ++- ++-#define CKM_TLS_PRF 0x00000378UL ++- ++-#define CKM_SSL3_MD5_MAC 0x00000380UL ++-#define CKM_SSL3_SHA1_MAC 0x00000381UL ++-#define CKM_MD5_KEY_DERIVATION 0x00000390UL ++-#define CKM_MD2_KEY_DERIVATION 0x00000391UL ++-#define CKM_SHA1_KEY_DERIVATION 0x00000392UL ++- ++-#define CKM_SHA256_KEY_DERIVATION 0x00000393UL ++-#define CKM_SHA384_KEY_DERIVATION 0x00000394UL ++-#define CKM_SHA512_KEY_DERIVATION 0x00000395UL ++-#define CKM_SHA224_KEY_DERIVATION 0x00000396UL ++-#define CKM_SHA3_256_KEY_DERIVATION 0x00000397UL ++-#define CKM_SHA3_224_KEY_DERIVATION 0x00000398UL ++-#define CKM_SHA3_384_KEY_DERIVATION 0x00000399UL ++-#define CKM_SHA3_512_KEY_DERIVATION 0x0000039AUL ++-#define CKM_SHAKE_128_KEY_DERIVATION 0x0000039BUL ++-#define CKM_SHAKE_256_KEY_DERIVATION 0x0000039CUL ++-#define CKM_SHA3_256_KEY_DERIVE CKM_SHA3_256_KEY_DERIVATION ++-#define CKM_SHA3_224_KEY_DERIVE CKM_SHA3_224_KEY_DERIVATION ++-#define CKM_SHA3_384_KEY_DERIVE CKM_SHA3_384_KEY_DERIVATION ++-#define CKM_SHA3_512_KEY_DERIVE CKM_SHA3_512_KEY_DERIVATION ++-#define CKM_SHAKE_128_KEY_DERIVE CKM_SHAKE_128_KEY_DERIVATION ++-#define CKM_SHAKE_256_KEY_DERIVE CKM_SHAKE_256_KEY_DERIVATION ++- ++-#define CKM_PBE_MD2_DES_CBC 0x000003A0UL ++-#define CKM_PBE_MD5_DES_CBC 0x000003A1UL ++-#define CKM_PBE_MD5_CAST_CBC 0x000003A2UL ++-#define CKM_PBE_MD5_CAST3_CBC 0x000003A3UL ++-#define CKM_PBE_MD5_CAST5_CBC 0x000003A4UL /* Deprecated */ ++-#define CKM_PBE_MD5_CAST128_CBC 0x000003A4UL ++-#define CKM_PBE_SHA1_CAST5_CBC 0x000003A5UL /* Deprecated */ ++-#define CKM_PBE_SHA1_CAST128_CBC 0x000003A5UL ++-#define CKM_PBE_SHA1_RC4_128 0x000003A6UL ++-#define CKM_PBE_SHA1_RC4_40 0x000003A7UL ++-#define CKM_PBE_SHA1_DES3_EDE_CBC 0x000003A8UL ++-#define CKM_PBE_SHA1_DES2_EDE_CBC 0x000003A9UL ++-#define CKM_PBE_SHA1_RC2_128_CBC 0x000003AAUL ++-#define CKM_PBE_SHA1_RC2_40_CBC 0x000003ABUL ++- ++-#define CKM_PKCS5_PBKD2 0x000003B0UL ++- ++-#define CKM_PBA_SHA1_WITH_SHA1_HMAC 0x000003C0UL ++- ++-#define CKM_WTLS_PRE_MASTER_KEY_GEN 0x000003D0UL ++-#define CKM_WTLS_MASTER_KEY_DERIVE 0x000003D1UL ++-#define CKM_WTLS_MASTER_KEY_DERIVE_DH_ECC 0x000003D2UL ++-#define CKM_WTLS_PRF 0x000003D3UL ++-#define CKM_WTLS_SERVER_KEY_AND_MAC_DERIVE 0x000003D4UL ++-#define CKM_WTLS_CLIENT_KEY_AND_MAC_DERIVE 0x000003D5UL ++- ++-#define CKM_TLS12_MAC 0x000003D8UL ++-#define CKM_TLS12_KDF 0x000003D9UL ++-#define CKM_TLS12_MASTER_KEY_DERIVE 0x000003E0UL ++-#define CKM_TLS12_KEY_AND_MAC_DERIVE 0x000003E1UL ++-#define CKM_TLS12_MASTER_KEY_DERIVE_DH 0x000003E2UL ++-#define CKM_TLS12_KEY_SAFE_DERIVE 0x000003E3UL ++-#define CKM_TLS_MAC 0x000003E4UL ++-#define CKM_TLS_KDF 0x000003E5UL ++- ++-#define CKM_KEY_WRAP_LYNKS 0x00000400UL ++-#define CKM_KEY_WRAP_SET_OAEP 0x00000401UL ++- ++-#define CKM_CMS_SIG 0x00000500UL ++-#define CKM_KIP_DERIVE 0x00000510UL ++-#define CKM_KIP_WRAP 0x00000511UL ++-#define CKM_KIP_MAC 0x00000512UL ++- ++-#define CKM_CAMELLIA_KEY_GEN 0x00000550UL ++-#define CKM_CAMELLIA_ECB 0x00000551UL ++-#define CKM_CAMELLIA_CBC 0x00000552UL ++-#define CKM_CAMELLIA_MAC 0x00000553UL ++-#define CKM_CAMELLIA_MAC_GENERAL 0x00000554UL ++-#define CKM_CAMELLIA_CBC_PAD 0x00000555UL ++-#define CKM_CAMELLIA_ECB_ENCRYPT_DATA 0x00000556UL ++-#define CKM_CAMELLIA_CBC_ENCRYPT_DATA 0x00000557UL ++-#define CKM_CAMELLIA_CTR 0x00000558UL ++- ++-#define CKM_ARIA_KEY_GEN 0x00000560UL ++-#define CKM_ARIA_ECB 0x00000561UL ++-#define CKM_ARIA_CBC 0x00000562UL ++-#define CKM_ARIA_MAC 0x00000563UL ++-#define CKM_ARIA_MAC_GENERAL 0x00000564UL ++-#define CKM_ARIA_CBC_PAD 0x00000565UL ++-#define CKM_ARIA_ECB_ENCRYPT_DATA 0x00000566UL ++-#define CKM_ARIA_CBC_ENCRYPT_DATA 0x00000567UL ++- ++-#define CKM_SEED_KEY_GEN 0x00000650UL ++-#define CKM_SEED_ECB 0x00000651UL ++-#define CKM_SEED_CBC 0x00000652UL ++-#define CKM_SEED_MAC 0x00000653UL ++-#define CKM_SEED_MAC_GENERAL 0x00000654UL ++-#define CKM_SEED_CBC_PAD 0x00000655UL ++-#define CKM_SEED_ECB_ENCRYPT_DATA 0x00000656UL ++-#define CKM_SEED_CBC_ENCRYPT_DATA 0x00000657UL ++- ++-#define CKM_SKIPJACK_KEY_GEN 0x00001000UL ++-#define CKM_SKIPJACK_ECB64 0x00001001UL ++-#define CKM_SKIPJACK_CBC64 0x00001002UL ++-#define CKM_SKIPJACK_OFB64 0x00001003UL ++-#define CKM_SKIPJACK_CFB64 0x00001004UL ++-#define CKM_SKIPJACK_CFB32 0x00001005UL ++-#define CKM_SKIPJACK_CFB16 0x00001006UL ++-#define CKM_SKIPJACK_CFB8 0x00001007UL ++-#define CKM_SKIPJACK_WRAP 0x00001008UL ++-#define CKM_SKIPJACK_PRIVATE_WRAP 0x00001009UL ++-#define CKM_SKIPJACK_RELAYX 0x0000100aUL ++-#define CKM_KEA_KEY_PAIR_GEN 0x00001010UL ++-#define CKM_KEA_KEY_DERIVE 0x00001011UL ++-#define CKM_KEA_DERIVE 0x00001012UL ++-#define CKM_FORTEZZA_TIMESTAMP 0x00001020UL ++-#define CKM_BATON_KEY_GEN 0x00001030UL ++-#define CKM_BATON_ECB128 0x00001031UL ++-#define CKM_BATON_ECB96 0x00001032UL ++-#define CKM_BATON_CBC128 0x00001033UL ++-#define CKM_BATON_COUNTER 0x00001034UL ++-#define CKM_BATON_SHUFFLE 0x00001035UL ++-#define CKM_BATON_WRAP 0x00001036UL ++- ++-#define CKM_ECDSA_KEY_PAIR_GEN 0x00001040UL /* Deprecated */ ++-#define CKM_EC_KEY_PAIR_GEN 0x00001040UL ++- ++-#define CKM_ECDSA 0x00001041UL ++-#define CKM_ECDSA_SHA1 0x00001042UL ++-#define CKM_ECDSA_SHA224 0x00001043UL ++-#define CKM_ECDSA_SHA256 0x00001044UL ++-#define CKM_ECDSA_SHA384 0x00001045UL ++-#define CKM_ECDSA_SHA512 0x00001046UL +++#define CKM_CAST_KEY_GEN 0x00000300UL +++#define CKM_CAST_ECB 0x00000301UL +++#define CKM_CAST_CBC 0x00000302UL +++#define CKM_CAST_MAC 0x00000303UL +++#define CKM_CAST_MAC_GENERAL 0x00000304UL +++#define CKM_CAST_CBC_PAD 0x00000305UL +++#define CKM_CAST3_KEY_GEN 0x00000310UL +++#define CKM_CAST3_ECB 0x00000311UL +++#define CKM_CAST3_CBC 0x00000312UL +++#define CKM_CAST3_MAC 0x00000313UL +++#define CKM_CAST3_MAC_GENERAL 0x00000314UL +++#define CKM_CAST3_CBC_PAD 0x00000315UL +++#define CKM_CAST5_KEY_GEN 0x00000320UL +++#define CKM_CAST128_KEY_GEN 0x00000320UL +++#define CKM_CAST5_ECB 0x00000321UL +++#define CKM_CAST128_ECB 0x00000321UL +++#define CKM_CAST5_CBC 0x00000322UL +++#define CKM_CAST128_CBC 0x00000322UL +++#define CKM_CAST5_MAC 0x00000323UL +++#define CKM_CAST128_MAC 0x00000323UL +++#define CKM_CAST5_MAC_GENERAL 0x00000324UL +++#define CKM_CAST128_MAC_GENERAL 0x00000324UL +++#define CKM_CAST5_CBC_PAD 0x00000325UL +++#define CKM_CAST128_CBC_PAD 0x00000325UL +++#define CKM_RC5_KEY_GEN 0x00000330UL +++#define CKM_RC5_ECB 0x00000331UL +++#define CKM_RC5_CBC 0x00000332UL +++#define CKM_RC5_MAC 0x00000333UL +++#define CKM_RC5_MAC_GENERAL 0x00000334UL +++#define CKM_RC5_CBC_PAD 0x00000335UL +++#define CKM_IDEA_KEY_GEN 0x00000340UL +++#define CKM_IDEA_ECB 0x00000341UL +++#define CKM_IDEA_CBC 0x00000342UL +++#define CKM_IDEA_MAC 0x00000343UL +++#define CKM_IDEA_MAC_GENERAL 0x00000344UL +++#define CKM_IDEA_CBC_PAD 0x00000345UL +++#define CKM_GENERIC_SECRET_KEY_GEN 0x00000350UL +++#define CKM_CONCATENATE_BASE_AND_KEY 0x00000360UL +++#define CKM_CONCATENATE_BASE_AND_DATA 0x00000362UL +++#define CKM_CONCATENATE_DATA_AND_BASE 0x00000363UL +++#define CKM_XOR_BASE_AND_DATA 0x00000364UL +++#define CKM_EXTRACT_KEY_FROM_KEY 0x00000365UL +++#define CKM_SSL3_PRE_MASTER_KEY_GEN 0x00000370UL +++#define CKM_SSL3_MASTER_KEY_DERIVE 0x00000371UL +++#define CKM_SSL3_KEY_AND_MAC_DERIVE 0x00000372UL +++ +++/* CKM_SSL3_MASTER_KEY_DERIVE_DH, CKM_TLS_PRE_MASTER_KEY_GEN, +++ * CKM_TLS_MASTER_KEY_DERIVE, CKM_TLS_KEY_AND_MAC_DERIVE, and +++ * CKM_TLS_MASTER_KEY_DERIVE_DH are new for v2.11 */ +++#define CKM_SSL3_MASTER_KEY_DERIVE_DH 0x00000373UL +++#define CKM_TLS_PRE_MASTER_KEY_GEN 0x00000374UL +++#define CKM_TLS_MASTER_KEY_DERIVE 0x00000375UL +++#define CKM_TLS_KEY_AND_MAC_DERIVE 0x00000376UL +++#define CKM_TLS_MASTER_KEY_DERIVE_DH 0x00000377UL +++ +++/* CKM_TLS_PRF is new for v2.20 */ +++#define CKM_TLS_PRF 0x00000378UL +++ +++#define CKM_SSL3_MD5_MAC 0x00000380UL +++#define CKM_SSL3_SHA1_MAC 0x00000381UL +++#define CKM_MD5_KEY_DERIVATION 0x00000390UL +++#define CKM_MD2_KEY_DERIVATION 0x00000391UL +++#define CKM_SHA1_KEY_DERIVATION 0x00000392UL +++ +++/* CKM_SHA256/384/512 are new for v2.20 */ +++#define CKM_SHA256_KEY_DERIVATION 0x00000393UL +++#define CKM_SHA384_KEY_DERIVATION 0x00000394UL +++#define CKM_SHA512_KEY_DERIVATION 0x00000395UL +++ +++/* CKM_SHA224 new for v2.20 amendment 3 */ +++#define CKM_SHA224_KEY_DERIVATION 0x00000396UL +++ +++/* new for v2.40 */ +++#define CKM_SHA3_256_KEY_DERIVATION 0x00000397UL +++#define CKM_SHA3_224_KEY_DERIVATION 0x00000398UL +++#define CKM_SHA3_384_KEY_DERIVATION 0x00000399UL +++#define CKM_SHA3_512_KEY_DERIVATION 0x0000039AUL +++#define CKM_SHAKE_128_KEY_DERIVATION 0x0000039BUL +++#define CKM_SHAKE_256_KEY_DERIVATION 0x0000039CUL +++ +++#define CKM_PBE_MD2_DES_CBC 0x000003A0UL +++#define CKM_PBE_MD5_DES_CBC 0x000003A1UL +++#define CKM_PBE_MD5_CAST_CBC 0x000003A2UL +++#define CKM_PBE_MD5_CAST3_CBC 0x000003A3UL +++#define CKM_PBE_MD5_CAST5_CBC 0x000003A4UL +++#define CKM_PBE_MD5_CAST128_CBC 0x000003A4UL +++#define CKM_PBE_SHA1_CAST5_CBC 0x000003A5UL +++#define CKM_PBE_SHA1_CAST128_CBC 0x000003A5UL +++#define CKM_PBE_SHA1_RC4_128 0x000003A6UL +++#define CKM_PBE_SHA1_RC4_40 0x000003A7UL +++#define CKM_PBE_SHA1_DES3_EDE_CBC 0x000003A8UL +++#define CKM_PBE_SHA1_DES2_EDE_CBC 0x000003A9UL +++#define CKM_PBE_SHA1_RC2_128_CBC 0x000003AAUL +++#define CKM_PBE_SHA1_RC2_40_CBC 0x000003ABUL +++ +++/* CKM_PKCS5_PBKD2 is new for v2.10 */ +++#define CKM_PKCS5_PBKD2 0x000003B0UL +++ +++#define CKM_PBA_SHA1_WITH_SHA1_HMAC 0x000003C0UL +++ +++/* WTLS mechanisms are new for v2.20 */ +++#define CKM_WTLS_PRE_MASTER_KEY_GEN 0x000003D0UL +++#define CKM_WTLS_MASTER_KEY_DERIVE 0x000003D1UL +++#define CKM_WTLS_MASTER_KEY_DERIVE_DH_ECC 0x000003D2UL +++#define CKM_WTLS_PRF 0x000003D3UL +++#define CKM_WTLS_SERVER_KEY_AND_MAC_DERIVE 0x000003D4UL +++#define CKM_WTLS_CLIENT_KEY_AND_MAC_DERIVE 0x000003D5UL +++ +++/* TLS 1.2 mechanisms are new for v2.40 */ +++#define CKM_TLS12_MASTER_KEY_DERIVE 0x000003E0UL +++#define CKM_TLS12_KEY_AND_MAC_DERIVE 0x000003E1UL +++#define CKM_TLS12_MASTER_KEY_DERIVE_DH 0x000003E2UL +++#define CKM_TLS12_KEY_SAFE_DERIVE 0x000003E3UL +++#define CKM_TLS12_MAC 0x000003D8UL +++#define CKM_TLS12_KDF 0x000003D9UL +++#define CKM_TLS_MAC 0x000003E4UL +++#define CKM_TLS_KDF 0x000003E5UL +++ +++#define CKM_KEY_WRAP_LYNKS 0x00000400UL +++#define CKM_KEY_WRAP_SET_OAEP 0x00000401UL +++ +++/* CKM_CMS_SIG is new for v2.20 */ +++#define CKM_CMS_SIG 0x00000500UL +++ +++/* new for 2.40 */ +++#define CKM_KIP_DERIVE 0x00000510UL +++#define CKM_KIP_WRAP 0x00000511UL +++#define CKM_KIP_MAC 0x00000512UL +++ +++/* Fortezza mechanisms */ +++#define CKM_SKIPJACK_KEY_GEN 0x00001000UL +++#define CKM_SKIPJACK_ECB64 0x00001001UL +++#define CKM_SKIPJACK_CBC64 0x00001002UL +++#define CKM_SKIPJACK_OFB64 0x00001003UL +++#define CKM_SKIPJACK_CFB64 0x00001004UL +++#define CKM_SKIPJACK_CFB32 0x00001005UL +++#define CKM_SKIPJACK_CFB16 0x00001006UL +++#define CKM_SKIPJACK_CFB8 0x00001007UL +++#define CKM_SKIPJACK_WRAP 0x00001008UL +++#define CKM_SKIPJACK_PRIVATE_WRAP 0x00001009UL +++#define CKM_SKIPJACK_RELAYX 0x0000100aUL +++#define CKM_KEA_KEY_PAIR_GEN 0x00001010UL +++#define CKM_KEA_KEY_DERIVE 0x00001011UL +++#define CKM_FORTEZZA_TIMESTAMP 0x00001020UL +++#define CKM_BATON_KEY_GEN 0x00001030UL +++#define CKM_BATON_ECB128 0x00001031UL +++#define CKM_BATON_ECB96 0x00001032UL +++#define CKM_BATON_CBC128 0x00001033UL +++#define CKM_BATON_COUNTER 0x00001034UL +++#define CKM_BATON_SHUFFLE 0x00001035UL +++#define CKM_BATON_WRAP 0x00001036UL +++ +++/* CKM_ECDSA_KEY_PAIR_GEN is deprecated in v2.11, +++ * CKM_EC_KEY_PAIR_GEN is preferred */ +++#define CKM_ECDSA_KEY_PAIR_GEN 0x00001040UL +++#define CKM_EC_KEY_PAIR_GEN 0x00001040UL +++ +++#define CKM_ECDSA 0x00001041UL +++#define CKM_ECDSA_SHA1 0x00001042UL +++ +++/* new for v2.40 */ +++#define CKM_ECDSA_SHA224 0x00001043UL +++#define CKM_ECDSA_SHA256 0x00001044UL +++#define CKM_ECDSA_SHA384 0x00001045UL +++#define CKM_ECDSA_SHA512 0x00001046UL ++ #define CKM_EC_KEY_PAIR_GEN_W_EXTRA_BITS 0x0000140BUL ++ ++-#define CKM_ECDH1_DERIVE 0x00001050UL ++-#define CKM_ECDH1_COFACTOR_DERIVE 0x00001051UL ++-#define CKM_ECMQV_DERIVE 0x00001052UL ++- ++-#define CKM_ECDH_AES_KEY_WRAP 0x00001053UL ++-#define CKM_RSA_AES_KEY_WRAP 0x00001054UL ++- ++-#define CKM_JUNIPER_KEY_GEN 0x00001060UL ++-#define CKM_JUNIPER_ECB128 0x00001061UL ++-#define CKM_JUNIPER_CBC128 0x00001062UL ++-#define CKM_JUNIPER_COUNTER 0x00001063UL ++-#define CKM_JUNIPER_SHUFFLE 0x00001064UL ++-#define CKM_JUNIPER_WRAP 0x00001065UL ++-#define CKM_FASTHASH 0x00001070UL ++- ++-#define CKM_AES_XTS 0x00001071UL ++-#define CKM_AES_XTS_KEY_GEN 0x00001072UL ++-#define CKM_AES_KEY_GEN 0x00001080UL ++-#define CKM_AES_ECB 0x00001081UL ++-#define CKM_AES_CBC 0x00001082UL ++-#define CKM_AES_MAC 0x00001083UL ++-#define CKM_AES_MAC_GENERAL 0x00001084UL ++-#define CKM_AES_CBC_PAD 0x00001085UL ++-#define CKM_AES_CTR 0x00001086UL ++-#define CKM_AES_GCM 0x00001087UL ++-#define CKM_AES_CCM 0x00001088UL ++-#define CKM_AES_CTS 0x00001089UL ++-#define CKM_AES_CMAC 0x0000108AUL ++-#define CKM_AES_CMAC_GENERAL 0x0000108BUL ++- ++-#define CKM_AES_XCBC_MAC 0x0000108CUL ++-#define CKM_AES_XCBC_MAC_96 0x0000108DUL ++-#define CKM_AES_GMAC 0x0000108EUL ++- ++-#define CKM_BLOWFISH_KEY_GEN 0x00001090UL ++-#define CKM_BLOWFISH_CBC 0x00001091UL ++-#define CKM_TWOFISH_KEY_GEN 0x00001092UL ++-#define CKM_TWOFISH_CBC 0x00001093UL ++-#define CKM_BLOWFISH_CBC_PAD 0x00001094UL ++-#define CKM_TWOFISH_CBC_PAD 0x00001095UL ++- ++-#define CKM_DES_ECB_ENCRYPT_DATA 0x00001100UL ++-#define CKM_DES_CBC_ENCRYPT_DATA 0x00001101UL ++-#define CKM_DES3_ECB_ENCRYPT_DATA 0x00001102UL ++-#define CKM_DES3_CBC_ENCRYPT_DATA 0x00001103UL ++-#define CKM_AES_ECB_ENCRYPT_DATA 0x00001104UL ++-#define CKM_AES_CBC_ENCRYPT_DATA 0x00001105UL ++- ++-#define CKM_GOSTR3410_KEY_PAIR_GEN 0x00001200UL ++-#define CKM_GOSTR3410 0x00001201UL ++-#define CKM_GOSTR3410_WITH_GOSTR3411 0x00001202UL ++-#define CKM_GOSTR3410_KEY_WRAP 0x00001203UL ++-#define CKM_GOSTR3410_DERIVE 0x00001204UL ++-#define CKM_GOSTR3411 0x00001210UL ++-#define CKM_GOSTR3411_HMAC 0x00001211UL ++-#define CKM_GOST28147_KEY_GEN 0x00001220UL ++-#define CKM_GOST28147_ECB 0x00001221UL ++-#define CKM_GOST28147 0x00001222UL ++-#define CKM_GOST28147_MAC 0x00001223UL ++-#define CKM_GOST28147_KEY_WRAP 0x00001224UL ++-#define CKM_CHACHA20_KEY_GEN 0x00001225UL ++-#define CKM_CHACHA20 0x00001226UL ++-#define CKM_POLY1305_KEY_GEN 0x00001227UL ++-#define CKM_POLY1305 0x00001228UL ++-#define CKM_DSA_PARAMETER_GEN 0x00002000UL ++-#define CKM_DH_PKCS_PARAMETER_GEN 0x00002001UL ++-#define CKM_X9_42_DH_PARAMETER_GEN 0x00002002UL ++-#define CKM_DSA_PROBABILISTIC_PARAMETER_GEN 0x00002003UL ++-#define CKM_DSA_PROBABLISTIC_PARAMETER_GEN CKM_DSA_PROBABILISTIC_PARAMETER_GEN ++-#define CKM_DSA_SHAWE_TAYLOR_PARAMETER_GEN 0x00002004UL ++-#define CKM_DSA_FIPS_G_GEN 0x00002005UL ++- ++-#define CKM_AES_OFB 0x00002104UL ++-#define CKM_AES_CFB64 0x00002105UL ++-#define CKM_AES_CFB8 0x00002106UL ++-#define CKM_AES_CFB128 0x00002107UL ++- ++-#define CKM_AES_CFB1 0x00002108UL ++-#define CKM_AES_KEY_WRAP 0x00002109UL /* WAS: 0x00001090 */ ++-#define CKM_AES_KEY_WRAP_PAD 0x0000210AUL /* WAS: 0x00001091 */ ++-#define CKM_AES_KEY_WRAP_KWP 0x0000210BUL ++- ++-#define CKM_RSA_PKCS_TPM_1_1 0x00004001UL ++-#define CKM_RSA_PKCS_OAEP_TPM_1_1 0x00004002UL ++- ++-#define CKM_SHA_1_KEY_GEN 0x00004003UL ++-#define CKM_SHA224_KEY_GEN 0x00004004UL ++-#define CKM_SHA256_KEY_GEN 0x00004005UL ++-#define CKM_SHA384_KEY_GEN 0x00004006UL ++-#define CKM_SHA512_KEY_GEN 0x00004007UL ++-#define CKM_SHA512_224_KEY_GEN 0x00004008UL ++-#define CKM_SHA512_256_KEY_GEN 0x00004009UL ++-#define CKM_SHA512_T_KEY_GEN 0x0000400aUL ++-#define CKM_NULL 0x0000400bUL ++-#define CKM_BLAKE2B_160 0x0000400cUL ++-#define CKM_BLAKE2B_160_HMAC 0x0000400dUL ++-#define CKM_BLAKE2B_160_HMAC_GENERAL 0x0000400eUL ++-#define CKM_BLAKE2B_160_KEY_DERIVE 0x0000400fUL ++-#define CKM_BLAKE2B_160_KEY_GEN 0x00004010UL ++-#define CKM_BLAKE2B_256 0x00004011UL ++-#define CKM_BLAKE2B_256_HMAC 0x00004012UL ++-#define CKM_BLAKE2B_256_HMAC_GENERAL 0x00004013UL ++-#define CKM_BLAKE2B_256_KEY_DERIVE 0x00004014UL ++-#define CKM_BLAKE2B_256_KEY_GEN 0x00004015UL ++-#define CKM_BLAKE2B_384 0x00004016UL ++-#define CKM_BLAKE2B_384_HMAC 0x00004017UL ++-#define CKM_BLAKE2B_384_HMAC_GENERAL 0x00004018UL ++-#define CKM_BLAKE2B_384_KEY_DERIVE 0x00004019UL ++-#define CKM_BLAKE2B_384_KEY_GEN 0x0000401aUL ++-#define CKM_BLAKE2B_512 0x0000401bUL ++-#define CKM_BLAKE2B_512_HMAC 0x0000401cUL ++-#define CKM_BLAKE2B_512_HMAC_GENERAL 0x0000401dUL ++-#define CKM_BLAKE2B_512_KEY_DERIVE 0x0000401eUL ++-#define CKM_BLAKE2B_512_KEY_GEN 0x0000401fUL ++-#define CKM_SALSA20 0x00004020UL ++-#define CKM_CHACHA20_POLY1305 0x00004021UL ++-#define CKM_SALSA20_POLY1305 0x00004022UL ++-#define CKM_X3DH_INITIALIZE 0x00004023UL ++-#define CKM_X3DH_RESPOND 0x00004024UL ++-#define CKM_X2RATCHET_INITIALIZE 0x00004025UL ++-#define CKM_X2RATCHET_RESPOND 0x00004026UL ++-#define CKM_X2RATCHET_ENCRYPT 0x00004027UL ++-#define CKM_X2RATCHET_DECRYPT 0x00004028UL ++-#define CKM_XEDDSA 0x00004029UL ++-#define CKM_HKDF_DERIVE 0x0000402aUL ++-#define CKM_HKDF_DATA 0x0000402bUL ++-#define CKM_HKDF_KEY_GEN 0x0000402cUL ++-#define CKM_SALSA20_KEY_GEN 0x0000402dUL ++- ++-#define CKM_ECDSA_SHA3_224 0x00001047UL ++-#define CKM_ECDSA_SHA3_256 0x00001048UL ++-#define CKM_ECDSA_SHA3_384 0x00001049UL ++-#define CKM_ECDSA_SHA3_512 0x0000104aUL ++-#define CKM_EC_EDWARDS_KEY_PAIR_GEN 0x00001055UL +++/* CKM_ECDH1_DERIVE, CKM_ECDH1_COFACTOR_DERIVE, and CKM_ECMQV_DERIVE +++ * are new for v2.11 */ +++#define CKM_ECDH1_DERIVE 0x00001050UL +++#define CKM_ECDH1_COFACTOR_DERIVE 0x00001051UL +++#define CKM_ECMQV_DERIVE 0x00001052UL +++ +++/* new for v2.40 */ +++#define CKM_ECDH_AES_KEY_WRAP 0x00001053UL +++#define CKM_RSA_AES_KEY_WRAP 0x00001054UL +++ +++#define CKM_JUNIPER_KEY_GEN 0x00001060UL +++#define CKM_JUNIPER_ECB128 0x00001061UL +++#define CKM_JUNIPER_CBC128 0x00001062UL +++#define CKM_JUNIPER_COUNTER 0x00001063UL +++#define CKM_JUNIPER_SHUFFLE 0x00001064UL +++#define CKM_JUNIPER_WRAP 0x00001065UL +++#define CKM_FASTHASH 0x00001070UL +++ +++/* CKM_AES_KEY_GEN, CKM_AES_ECB, CKM_AES_CBC, CKM_AES_MAC, +++ * CKM_AES_MAC_GENERAL, CKM_AES_CBC_PAD, CKM_DSA_PARAMETER_GEN, +++ * CKM_DH_PKCS_PARAMETER_GEN, and CKM_X9_42_DH_PARAMETER_GEN are +++ * new for v2.11 */ +++#define CKM_AES_KEY_GEN 0x00001080UL +++#define CKM_AES_ECB 0x00001081UL +++#define CKM_AES_CBC 0x00001082UL +++#define CKM_AES_MAC 0x00001083UL +++#define CKM_AES_MAC_GENERAL 0x00001084UL +++#define CKM_AES_CBC_PAD 0x00001085UL +++/* new for v2.20 amendment 3 */ +++#define CKM_AES_CTR 0x00001086UL +++/* new for v2.30 */ +++#define CKM_AES_GCM 0x00001087UL +++#define CKM_AES_CCM 0x00001088UL +++#define CKM_AES_CTS 0x00001089UL +++/* AES-CMAC values copied from v2.40 errata 1 header file */ +++#define CKM_AES_CMAC 0x0000108AUL +++#define CKM_AES_CMAC_GENERAL 0x0000108BUL +++#define CKM_AES_XCBC_MAC 0x0000108CUL +++#define CKM_AES_XCBC_MAC_96 0x0000108DUL +++ +++/* BlowFish and TwoFish are new for v2.20 */ +++#define CKM_BLOWFISH_KEY_GEN 0x00001090UL +++#define CKM_BLOWFISH_CBC 0x00001091UL +++#define CKM_TWOFISH_KEY_GEN 0x00001092UL +++#define CKM_TWOFISH_CBC 0x00001093UL +++ +++/* new for v2.40 */ +++#define CKM_BLOWFISH_CBC_PAD 0x00001094UL +++#define CKM_TWOFISH_CBC_PAD 0x00001095UL +++ +++/* Camellia is proposed for v2.20 Amendment 3 */ +++#define CKM_CAMELLIA_KEY_GEN 0x00000550UL +++#define CKM_CAMELLIA_ECB 0x00000551UL +++#define CKM_CAMELLIA_CBC 0x00000552UL +++#define CKM_CAMELLIA_MAC 0x00000553UL +++#define CKM_CAMELLIA_MAC_GENERAL 0x00000554UL +++#define CKM_CAMELLIA_CBC_PAD 0x00000555UL +++#define CKM_CAMELLIA_ECB_ENCRYPT_DATA 0x00000556UL +++#define CKM_CAMELLIA_CBC_ENCRYPT_DATA 0x00000557UL +++ +++/* new for v2.40 */ +++#define CKM_ARIA_KEY_GEN 0x00000560UL +++#define CKM_ARIA_ECB 0x00000561UL +++#define CKM_ARIA_CBC 0x00000562UL +++#define CKM_ARIA_MAC 0x00000563UL +++#define CKM_ARIA_MAC_GENERAL 0x00000564UL +++#define CKM_ARIA_CBC_PAD 0x00000565UL +++#define CKM_ARIA_ECB_ENCRYPT_DATA 0x00000566UL +++#define CKM_ARIA_CBC_ENCRYPT_DATA 0x00000567UL +++ +++#define CKM_SEED_KEY_GEN 0x00000650UL +++#define CKM_SEED_ECB 0x00000651UL +++#define CKM_SEED_CBC 0x00000652UL +++#define CKM_SEED_MAC 0x00000653UL +++#define CKM_SEED_MAC_GENERAL 0x00000654UL +++#define CKM_SEED_CBC_PAD 0x00000655UL +++#define CKM_SEED_ECB_ENCRYPT_DATA 0x00000656UL +++#define CKM_SEED_CBC_ENCRYPT_DATA 0x00000657UL +++ +++/* new for v2.40 */ +++#define CKM_ECDSA_SHA3_224 0x00001047UL +++#define CKM_ECDSA_SHA3_256 0x00001048UL +++#define CKM_ECDSA_SHA3_384 0x00001049UL +++#define CKM_ECDSA_SHA3_512 0x0000104aUL +++#define CKM_EC_EDWARDS_KEY_PAIR_GEN 0x00001055UL ++ #define CKM_EC_MONTGOMERY_KEY_PAIR_GEN 0x00001056UL ++-#define CKM_EDDSA 0x00001057UL ++-#define CKM_SP800_108_COUNTER_KDF 0x000003acUL ++-#define CKM_SP800_108_FEEDBACK_KDF 0x000003adUL +++#define CKM_EDDSA 0x00001057UL +++ +++/* CKM_xxx_ENCRYPT_DATA mechanisms are new for v2.20 */ +++#define CKM_DES_ECB_ENCRYPT_DATA 0x00001100UL +++#define CKM_DES_CBC_ENCRYPT_DATA 0x00001101UL +++#define CKM_DES3_ECB_ENCRYPT_DATA 0x00001102UL +++#define CKM_DES3_CBC_ENCRYPT_DATA 0x00001103UL +++#define CKM_AES_ECB_ENCRYPT_DATA 0x00001104UL +++#define CKM_AES_CBC_ENCRYPT_DATA 0x00001105UL +++ +++#define CKM_GOSTR3410_KEY_PAIR_GEN 0x00001200UL +++#define CKM_GOSTR3410 0x00001201UL +++#define CKM_GOSTR3410_WITH_GOSTR3411 0x00001202UL +++#define CKM_GOSTR3410_KEY_WRAP 0x00001203UL +++#define CKM_GOSTR3410_DERIVE 0x00001204UL +++#define CKM_GOSTR3411 0x00001210UL +++#define CKM_GOSTR3411_HMAC 0x00001211UL +++#define CKM_GOST28147_KEY_GEN 0x00001220UL +++#define CKM_GOST28147_ECB 0x00001221UL +++#define CKM_GOST28147 0x00001222UL +++#define CKM_GOST28147_MAC 0x00001223UL +++#define CKM_GOST28147_KEY_WRAP 0x00001224UL +++ +++/* new for v2.40 */ +++#define CKM_CHACHA20_KEY_GEN 0x00001225UL +++#define CKM_CHACHA20 0x00001226UL +++#define CKM_POLY1305_KEY_GEN 0x00001227UL +++#define CKM_POLY1305 0x00001228UL +++ +++#define CKM_DSA_PARAMETER_GEN 0x00002000UL +++#define CKM_DH_PKCS_PARAMETER_GEN 0x00002001UL +++#define CKM_X9_42_DH_PARAMETER_GEN 0x00002002UL +++ +++/* new for v2.40 */ +++#define CKM_DSA_PROBABILISTIC_PARAMETER_GEN 0x00002003UL +++#define CKM_DSA_SHAWE_TAYLOR_PARAMETER_GEN 0x00002004UL +++#define CKM_DSA_FIPS_G_GEN 0x00002005UL +++#define CKM_AES_CFB1 0x00002108UL +++#define CKM_AES_KEY_WRAP 0x00002109UL +++#define CKM_AES_KEY_WRAP_PAD 0x0000210AUL +++#define CKM_AES_KEY_WRAP_KWP 0x0000210BUL +++ +++/* CKM_SP800_108_xxx_KDF are new for v3.0 */ +++#define CKM_SP800_108_COUNTER_KDF 0x000003acUL +++#define CKM_SP800_108_FEEDBACK_KDF 0x000003adUL ++ #define CKM_SP800_108_DOUBLE_PIPELINE_KDF 0x000003aeUL ++ ++-#define CKM_VENDOR_DEFINED 0x80000000UL +++/* new for v2.4 */ +++#define CKM_RSA_PKCS_TPM_1_1 0x00004001UL +++#define CKM_RSA_PKCS_OAEP_TPM_1_1 0x00004002UL +++#define CKM_SHA_1_KEY_GEN 0x00004003UL +++#define CKM_SHA224_KEY_GEN 0x00004004UL +++#define CKM_SHA256_KEY_GEN 0x00004005UL +++#define CKM_SHA384_KEY_GEN 0x00004006UL +++#define CKM_SHA512_KEY_GEN 0x00004007UL +++#define CKM_SHA512_224_KEY_GEN 0x00004008UL +++#define CKM_SHA512_256_KEY_GEN 0x00004009UL +++#define CKM_SHA512_T_KEY_GEN 0x0000400aUL +++ +++/* new for v3.0 */ +++#define CKM_NULL 0x0000400bUL +++#define CKM_BLAKE2B_160 0x0000400cUL +++#define CKM_BLAKE2B_160_HMAC 0x0000400dUL +++#define CKM_BLAKE2B_160_HMAC_GENERAL 0x0000400eUL +++#define CKM_BLAKE2B_160_KEY_DERIVE 0x0000400fUL +++#define CKM_BLAKE2B_160_KEY_GEN 0x00004010UL +++#define CKM_BLAKE2B_256 0x00004011UL +++#define CKM_BLAKE2B_256_HMAC 0x00004012UL +++#define CKM_BLAKE2B_256_HMAC_GENERAL 0x00004013UL +++#define CKM_BLAKE2B_256_KEY_DERIVE 0x00004014UL +++#define CKM_BLAKE2B_256_KEY_GEN 0x00004015UL +++#define CKM_BLAKE2B_384 0x00004016UL +++#define CKM_BLAKE2B_384_HMAC 0x00004017UL +++#define CKM_BLAKE2B_384_HMAC_GENERAL 0x00004018UL +++#define CKM_BLAKE2B_384_KEY_DERIVE 0x00004019UL +++#define CKM_BLAKE2B_384_KEY_GEN 0x0000401aUL +++#define CKM_BLAKE2B_512 0x0000401bUL +++#define CKM_BLAKE2B_512_HMAC 0x0000401cUL +++#define CKM_BLAKE2B_512_HMAC_GENERAL 0x0000401dUL +++#define CKM_BLAKE2B_512_KEY_DERIVE 0x0000401eUL +++#define CKM_BLAKE2B_512_KEY_GEN 0x0000401fUL +++#define CKM_SALSA20 0x00004020UL +++#define CKM_CHACHA20_POLY1305 0x00004021UL +++#define CKM_SALSA20_POLY1305 0x00004022UL +++#define CKM_X3DH_INITIALIZE 0x00004023UL +++#define CKM_X3DH_RESPOND 0x00004024UL +++#define CKM_X2RATCHET_INITIALIZE 0x00004025UL +++#define CKM_X2RATCHET_RESPOND 0x00004026UL +++#define CKM_X2RATCHET_ENCRYPT 0x00004027UL +++#define CKM_X2RATCHET_DECRYPT 0x00004028UL +++#define CKM_XEDDSA 0x00004029UL +++#define CKM_HKDF_DERIVE 0x0000402aUL +++#define CKM_HKDF_DATA 0x0000402bUL +++#define CKM_HKDF_KEY_GEN 0x0000402cUL +++#define CKM_SALSA20_KEY_GEN 0x0000402dUL ++ ++-typedef CK_MECHANISM_TYPE CK_PTR CK_MECHANISM_TYPE_PTR; +++#define CKM_VENDOR_DEFINED 0x80000000UL ++ +++typedef CK_MECHANISM_TYPE CK_PTR CK_MECHANISM_TYPE_PTR; ++ ++ /* CK_MECHANISM is a structure that specifies a particular ++- * mechanism ++- */ +++ * mechanism */ ++ typedef struct CK_MECHANISM { ++ CK_MECHANISM_TYPE mechanism; ++- CK_VOID_PTR pParameter; ++- CK_ULONG ulParameterLen; /* in bytes */ +++ CK_VOID_PTR pParameter; +++ +++ /* ulParameterLen was changed from CK_USHORT to CK_ULONG for +++ * v2.0 */ +++ CK_ULONG ulParameterLen; /* in bytes */ ++ } CK_MECHANISM; ++ ++ typedef CK_MECHANISM CK_PTR CK_MECHANISM_PTR; ++ ++- ++ /* CK_MECHANISM_INFO provides information about a particular ++- * mechanism ++- */ +++ * mechanism */ ++ typedef struct CK_MECHANISM_INFO { ++ CK_ULONG ulMinKeySize; ++ CK_ULONG ulMaxKeySize; ++@@ -1152,711 +1267,947 @@ typedef struct CK_MECHANISM_INFO { ++ } CK_MECHANISM_INFO; ++ ++ /* The flags are defined as follows: ++- * Bit Flag Mask Meaning */ ++-#define CKF_HW 0x00000001UL /* performed by HW */ ++- ++-/* Specify whether or not a mechanism can be used for a particular task */ ++-#define CKF_MESSAGE_ENCRYPT 0x00000002UL ++-#define CKF_MESSAGE_DECRYPT 0x00000004UL ++-#define CKF_MESSAGE_SIGN 0x00000008UL ++-#define CKF_MESSAGE_VERIFY 0x00000010UL ++-#define CKF_MULTI_MESSAGE 0x00000020UL ++-#define CKF_MULTI_MESSGE CKF_MULTI_MESSAGE ++-#define CKF_FIND_OBJECTS 0x00000040UL ++- ++-#define CKF_ENCRYPT 0x00000100UL ++-#define CKF_DECRYPT 0x00000200UL ++-#define CKF_DIGEST 0x00000400UL ++-#define CKF_SIGN 0x00000800UL ++-#define CKF_SIGN_RECOVER 0x00001000UL ++-#define CKF_VERIFY 0x00002000UL ++-#define CKF_VERIFY_RECOVER 0x00004000UL ++-#define CKF_GENERATE 0x00008000UL ++-#define CKF_GENERATE_KEY_PAIR 0x00010000UL ++-#define CKF_WRAP 0x00020000UL ++-#define CKF_UNWRAP 0x00040000UL ++-#define CKF_DERIVE 0x00080000UL +++ * Bit Flag Mask Meaning */ +++#define CKF_HW 0x00000001UL /* performed by HW */ ++ ++-/* Describe a token's EC capabilities not available in mechanism ++- * information. ++- */ ++-#define CKF_EC_F_P 0x00100000UL ++-#define CKF_EC_F_2M 0x00200000UL ++-#define CKF_EC_ECPARAMETERS 0x00400000UL ++-#define CKF_EC_OID 0x00800000UL ++-#define CKF_EC_NAMEDCURVE CKF_EC_OID /* deprecated since PKCS#11 3.00 */ ++-#define CKF_EC_UNCOMPRESS 0x01000000UL ++-#define CKF_EC_COMPRESS 0x02000000UL ++-#define CKF_EC_CURVENAME 0x04000000UL +++/* Message interface Flags, new for v3.0 */ +++#define CKF_MESSAGE_ENCRYPT 0x00000002UL +++#define CKF_MESSAGE_DECRYPT 0x00000004UL +++#define CKF_MESSAGE_SIGN 0x00000008UL +++#define CKF_MESSAGE_VERIFY 0x00000010UL +++#define CKF_MULTI_MESSAGE 0x00000020UL +++ +++/* FindObjects (not for CK_MECHANISM_INFO, but for C_CancelSession) v3.0 */ +++#define CKF_FIND_OBJECTS 0x00000040UL +++ +++/* The flags CKF_ENCRYPT, CKF_DECRYPT, CKF_DIGEST, CKF_SIGN, +++ * CKG_SIGN_RECOVER, CKF_VERIFY, CKF_VERIFY_RECOVER, +++ * CKF_GENERATE, CKF_GENERATE_KEY_PAIR, CKF_WRAP, CKF_UNWRAP, +++ * and CKF_DERIVE are new for v2.0. They specify whether or not +++ * a mechanism can be used for a particular task */ +++#define CKF_ENCRYPT 0x00000100UL +++#define CKF_DECRYPT 0x00000200UL +++#define CKF_DIGEST 0x00000400UL +++#define CKF_SIGN 0x00000800UL +++#define CKF_SIGN_RECOVER 0x00001000UL +++#define CKF_VERIFY 0x00002000 +++#define CKF_VERIFY_RECOVER 0x00004000UL +++#define CKF_GENERATE 0x00008000UL +++#define CKF_GENERATE_KEY_PAIR 0x00010000UL +++#define CKF_WRAP 0x00020000UL +++#define CKF_UNWRAP 0x00040000UL +++#define CKF_DERIVE 0x00080000UL +++ +++/* CKF_EC_F_P, CKF_EC_F_2M, CKF_EC_ECPARAMETERS, CKF_EC_NAMEDCURVE, +++ * CKF_EC_UNCOMPRESS, and CKF_EC_COMPRESS are new for v2.11. They +++ * describe a token's EC capabilities not available in mechanism +++ * information. */ +++#define CKF_EC_F_P 0x00100000UL +++#define CKF_EC_F_2M 0x00200000UL +++#define CKF_EC_ECPARAMETERS 0x00400000UL +++#define CKF_EC_OID 0x00800000UL +++#define CKF_EC_NAMEDCURVE CKF_EC_OID /* renamed in v3.0 */ +++#define CKF_EC_UNCOMPRESS 0x01000000UL +++#define CKF_EC_COMPRESS 0x02000000UL ++ ++-#define CKF_EXTENSION 0x80000000UL +++#define CKF_EXTENSION 0x80000000UL /* FALSE for this version */ ++ ++ typedef CK_MECHANISM_INFO CK_PTR CK_MECHANISM_INFO_PTR; ++ ++ /* CK_RV is a value that identifies the return value of a ++- * Cryptoki function ++- */ ++-typedef CK_ULONG CK_RV; ++- ++-#define CKR_OK 0x00000000UL ++-#define CKR_CANCEL 0x00000001UL ++-#define CKR_HOST_MEMORY 0x00000002UL ++-#define CKR_SLOT_ID_INVALID 0x00000003UL ++- ++-#define CKR_GENERAL_ERROR 0x00000005UL ++-#define CKR_FUNCTION_FAILED 0x00000006UL ++- ++-#define CKR_ARGUMENTS_BAD 0x00000007UL ++-#define CKR_NO_EVENT 0x00000008UL ++-#define CKR_NEED_TO_CREATE_THREADS 0x00000009UL ++-#define CKR_CANT_LOCK 0x0000000AUL ++- ++-#define CKR_ATTRIBUTE_READ_ONLY 0x00000010UL ++-#define CKR_ATTRIBUTE_SENSITIVE 0x00000011UL ++-#define CKR_ATTRIBUTE_TYPE_INVALID 0x00000012UL ++-#define CKR_ATTRIBUTE_VALUE_INVALID 0x00000013UL ++- ++-#define CKR_ACTION_PROHIBITED 0x0000001BUL ++- ++-#define CKR_DATA_INVALID 0x00000020UL ++-#define CKR_DATA_LEN_RANGE 0x00000021UL ++-#define CKR_DEVICE_ERROR 0x00000030UL ++-#define CKR_DEVICE_MEMORY 0x00000031UL ++-#define CKR_DEVICE_REMOVED 0x00000032UL ++-#define CKR_ENCRYPTED_DATA_INVALID 0x00000040UL ++-#define CKR_ENCRYPTED_DATA_LEN_RANGE 0x00000041UL ++-#define CKR_AEAD_DECRYPT_FAILED 0x00000042UL ++-#define CKR_FUNCTION_CANCELED 0x00000050UL ++-#define CKR_FUNCTION_NOT_PARALLEL 0x00000051UL ++- ++-#define CKR_FUNCTION_NOT_SUPPORTED 0x00000054UL ++- ++-#define CKR_KEY_HANDLE_INVALID 0x00000060UL ++- ++-#define CKR_KEY_SIZE_RANGE 0x00000062UL ++-#define CKR_KEY_TYPE_INCONSISTENT 0x00000063UL ++- ++-#define CKR_KEY_NOT_NEEDED 0x00000064UL ++-#define CKR_KEY_CHANGED 0x00000065UL ++-#define CKR_KEY_NEEDED 0x00000066UL ++-#define CKR_KEY_INDIGESTIBLE 0x00000067UL ++-#define CKR_KEY_FUNCTION_NOT_PERMITTED 0x00000068UL ++-#define CKR_KEY_NOT_WRAPPABLE 0x00000069UL ++-#define CKR_KEY_UNEXTRACTABLE 0x0000006AUL ++- ++-#define CKR_MECHANISM_INVALID 0x00000070UL ++-#define CKR_MECHANISM_PARAM_INVALID 0x00000071UL ++- ++-#define CKR_OBJECT_HANDLE_INVALID 0x00000082UL ++-#define CKR_OPERATION_ACTIVE 0x00000090UL ++-#define CKR_OPERATION_NOT_INITIALIZED 0x00000091UL ++-#define CKR_PIN_INCORRECT 0x000000A0UL ++-#define CKR_PIN_INVALID 0x000000A1UL ++-#define CKR_PIN_LEN_RANGE 0x000000A2UL ++- ++-#define CKR_PIN_EXPIRED 0x000000A3UL ++-#define CKR_PIN_LOCKED 0x000000A4UL ++- ++-#define CKR_SESSION_CLOSED 0x000000B0UL ++-#define CKR_SESSION_COUNT 0x000000B1UL ++-#define CKR_SESSION_HANDLE_INVALID 0x000000B3UL ++-#define CKR_SESSION_PARALLEL_NOT_SUPPORTED 0x000000B4UL ++-#define CKR_SESSION_READ_ONLY 0x000000B5UL ++-#define CKR_SESSION_EXISTS 0x000000B6UL ++- ++-#define CKR_SESSION_READ_ONLY_EXISTS 0x000000B7UL ++-#define CKR_SESSION_READ_WRITE_SO_EXISTS 0x000000B8UL ++- ++-#define CKR_SIGNATURE_INVALID 0x000000C0UL ++-#define CKR_SIGNATURE_LEN_RANGE 0x000000C1UL ++-#define CKR_TEMPLATE_INCOMPLETE 0x000000D0UL ++-#define CKR_TEMPLATE_INCONSISTENT 0x000000D1UL ++-#define CKR_TOKEN_NOT_PRESENT 0x000000E0UL ++-#define CKR_TOKEN_NOT_RECOGNIZED 0x000000E1UL ++-#define CKR_TOKEN_WRITE_PROTECTED 0x000000E2UL ++-#define CKR_UNWRAPPING_KEY_HANDLE_INVALID 0x000000F0UL ++-#define CKR_UNWRAPPING_KEY_SIZE_RANGE 0x000000F1UL ++-#define CKR_UNWRAPPING_KEY_TYPE_INCONSISTENT 0x000000F2UL ++-#define CKR_USER_ALREADY_LOGGED_IN 0x00000100UL ++-#define CKR_USER_NOT_LOGGED_IN 0x00000101UL ++-#define CKR_USER_PIN_NOT_INITIALIZED 0x00000102UL ++-#define CKR_USER_TYPE_INVALID 0x00000103UL ++- ++-#define CKR_USER_ANOTHER_ALREADY_LOGGED_IN 0x00000104UL ++-#define CKR_USER_TOO_MANY_TYPES 0x00000105UL ++- ++-#define CKR_WRAPPED_KEY_INVALID 0x00000110UL ++-#define CKR_WRAPPED_KEY_LEN_RANGE 0x00000112UL ++-#define CKR_WRAPPING_KEY_HANDLE_INVALID 0x00000113UL ++-#define CKR_WRAPPING_KEY_SIZE_RANGE 0x00000114UL ++-#define CKR_WRAPPING_KEY_TYPE_INCONSISTENT 0x00000115UL ++-#define CKR_RANDOM_SEED_NOT_SUPPORTED 0x00000120UL ++- ++-#define CKR_RANDOM_NO_RNG 0x00000121UL ++- ++-#define CKR_DOMAIN_PARAMS_INVALID 0x00000130UL ++- ++-#define CKR_CURVE_NOT_SUPPORTED 0x00000140UL ++- ++-#define CKR_BUFFER_TOO_SMALL 0x00000150UL ++-#define CKR_SAVED_STATE_INVALID 0x00000160UL ++-#define CKR_INFORMATION_SENSITIVE 0x00000170UL ++-#define CKR_STATE_UNSAVEABLE 0x00000180UL ++- ++-#define CKR_CRYPTOKI_NOT_INITIALIZED 0x00000190UL ++-#define CKR_CRYPTOKI_ALREADY_INITIALIZED 0x00000191UL ++-#define CKR_MUTEX_BAD 0x000001A0UL ++-#define CKR_MUTEX_NOT_LOCKED 0x000001A1UL ++- ++-#define CKR_NEW_PIN_MODE 0x000001B0UL ++-#define CKR_NEXT_OTP 0x000001B1UL ++- ++-#define CKR_EXCEEDED_MAX_ITERATIONS 0x000001B5UL ++-#define CKR_FIPS_SELF_TEST_FAILED 0x000001B6UL ++-#define CKR_LIBRARY_LOAD_FAILED 0x000001B7UL ++-#define CKR_PIN_TOO_WEAK 0x000001B8UL ++-#define CKR_PUBLIC_KEY_INVALID 0x000001B9UL ++- ++-#define CKR_FUNCTION_REJECTED 0x00000200UL ++-#define CKR_TOKEN_RESOURCE_EXCEEDED 0x00000201UL ++-#define CKR_OPERATION_CANCEL_FAILED 0x00000202UL ++- ++-#define CKR_VENDOR_DEFINED 0x80000000UL +++ * PKCS #11 function */ +++/* CK_RV was changed from CK_USHORT to CK_ULONG for v2.0 */ +++typedef CK_ULONG CK_RV; +++ +++#define CKR_OK 0x00000000UL +++#define CKR_CANCEL 0x00000001UL +++#define CKR_HOST_MEMORY 0x00000002UL +++#define CKR_SLOT_ID_INVALID 0x00000003UL +++ +++/* CKR_FLAGS_INVALID was removed for v2.0 */ +++ +++/* CKR_GENERAL_ERROR and CKR_FUNCTION_FAILED are new for v2.0 */ +++#define CKR_GENERAL_ERROR 0x00000005UL +++#define CKR_FUNCTION_FAILED 0x00000006UL +++ +++/* CKR_ARGUMENTS_BAD, CKR_NO_EVENT, CKR_NEED_TO_CREATE_THREADS, +++ * and CKR_CANT_LOCK are new for v2.01 */ +++#define CKR_ARGUMENTS_BAD 0x00000007UL +++#define CKR_NO_EVENT 0x00000008UL +++#define CKR_NEED_TO_CREATE_THREADS 0x00000009UL +++#define CKR_CANT_LOCK 0x0000000AUL +++ +++#define CKR_ATTRIBUTE_READ_ONLY 0x00000010UL +++#define CKR_ATTRIBUTE_SENSITIVE 0x00000011UL +++#define CKR_ATTRIBUTE_TYPE_INVALID 0x00000012UL +++#define CKR_ATTRIBUTE_VALUE_INVALID 0x00000013UL +++ +++/* new for v3.0 */ +++#define CKR_ACTION_PROHIBITED 0x0000001BUL +++ +++#define CKR_DATA_INVALID 0x00000020UL +++#define CKR_DATA_LEN_RANGE 0x00000021UL +++#define CKR_DEVICE_ERROR 0x00000030UL +++#define CKR_DEVICE_MEMORY 0x00000031UL +++#define CKR_DEVICE_REMOVED 0x00000032UL +++#define CKR_ENCRYPTED_DATA_INVALID 0x00000040UL +++#define CKR_ENCRYPTED_DATA_LEN_RANGE 0x00000041UL +++#define CKR_FUNCTION_CANCELED 0x00000050UL +++#define CKR_FUNCTION_NOT_PARALLEL 0x00000051UL +++ +++/* CKR_FUNCTION_NOT_SUPPORTED is new for v2.0 */ +++#define CKR_FUNCTION_NOT_SUPPORTED 0x00000054UL +++ +++#define CKR_KEY_HANDLE_INVALID 0x00000060UL +++ +++/* CKR_KEY_SENSITIVE was removed for v2.0 */ +++ +++#define CKR_KEY_SIZE_RANGE 0x00000062UL +++#define CKR_KEY_TYPE_INCONSISTENT 0x00000063UL +++ +++/* CKR_KEY_NOT_NEEDED, CKR_KEY_CHANGED, CKR_KEY_NEEDED, +++ * CKR_KEY_INDIGESTIBLE, CKR_KEY_FUNCTION_NOT_PERMITTED, +++ * CKR_KEY_NOT_WRAPPABLE, and CKR_KEY_UNEXTRACTABLE are new for +++ * v2.0 */ +++#define CKR_KEY_NOT_NEEDED 0x00000064UL +++#define CKR_KEY_CHANGED 0x00000065UL +++#define CKR_KEY_NEEDED 0x00000066UL +++#define CKR_KEY_INDIGESTIBLE 0x00000067UL +++#define CKR_KEY_FUNCTION_NOT_PERMITTED 0x00000068UL +++#define CKR_KEY_NOT_WRAPPABLE 0x00000069UL +++#define CKR_KEY_UNEXTRACTABLE 0x0000006AUL +++ +++#define CKR_MECHANISM_INVALID 0x00000070UL +++#define CKR_MECHANISM_PARAM_INVALID 0x00000071UL +++ +++/* CKR_OBJECT_CLASS_INCONSISTENT and CKR_OBJECT_CLASS_INVALID +++ * were removed for v2.0 */ +++#define CKR_OBJECT_HANDLE_INVALID 0x00000082UL +++#define CKR_OPERATION_ACTIVE 0x00000090UL +++#define CKR_OPERATION_NOT_INITIALIZED 0x00000091UL +++#define CKR_PIN_INCORRECT 0x000000A0UL +++#define CKR_PIN_INVALID 0x000000A1UL +++#define CKR_PIN_LEN_RANGE 0x000000A2UL +++ +++/* CKR_PIN_EXPIRED and CKR_PIN_LOCKED are new for v2.0 */ +++#define CKR_PIN_EXPIRED 0x000000A3UL +++#define CKR_PIN_LOCKED 0x000000A4UL +++ +++#define CKR_SESSION_CLOSED 0x000000B0UL +++#define CKR_SESSION_COUNT 0x000000B1UL +++#define CKR_SESSION_HANDLE_INVALID 0x000000B3UL +++#define CKR_SESSION_PARALLEL_NOT_SUPPORTED 0x000000B4UL +++#define CKR_SESSION_READ_ONLY 0x000000B5UL +++#define CKR_SESSION_EXISTS 0x000000B6UL +++ +++/* CKR_SESSION_READ_ONLY_EXISTS and +++ * CKR_SESSION_READ_WRITE_SO_EXISTS are new for v2.0 */ +++#define CKR_SESSION_READ_ONLY_EXISTS 0x000000B7UL +++#define CKR_SESSION_READ_WRITE_SO_EXISTS 0x000000B8UL +++ +++#define CKR_SIGNATURE_INVALID 0x000000C0UL +++#define CKR_SIGNATURE_LEN_RANGE 0x000000C1UL +++#define CKR_TEMPLATE_INCOMPLETE 0x000000D0UL +++#define CKR_TEMPLATE_INCONSISTENT 0x000000D1UL +++#define CKR_TOKEN_NOT_PRESENT 0x000000E0UL +++#define CKR_TOKEN_NOT_RECOGNIZED 0x000000E1UL +++#define CKR_TOKEN_WRITE_PROTECTED 0x000000E2UL +++#define CKR_UNWRAPPING_KEY_HANDLE_INVALID 0x000000F0UL +++#define CKR_UNWRAPPING_KEY_SIZE_RANGE 0x000000F1UL +++#define CKR_UNWRAPPING_KEY_TYPE_INCONSISTENT 0x000000F2UL +++#define CKR_USER_ALREADY_LOGGED_IN 0x00000100UL +++#define CKR_USER_NOT_LOGGED_IN 0x00000101UL +++#define CKR_USER_PIN_NOT_INITIALIZED 0x00000102UL +++#define CKR_USER_TYPE_INVALID 0x00000103UL +++ +++/* CKR_USER_ANOTHER_ALREADY_LOGGED_IN and CKR_USER_TOO_MANY_TYPES +++ * are new to v2.01 */ +++#define CKR_USER_ANOTHER_ALREADY_LOGGED_IN 0x00000104UL +++#define CKR_USER_TOO_MANY_TYPES 0x00000105UL +++ +++#define CKR_WRAPPED_KEY_INVALID 0x00000110UL +++#define CKR_WRAPPED_KEY_LEN_RANGE 0x00000112UL +++#define CKR_WRAPPING_KEY_HANDLE_INVALID 0x00000113UL +++#define CKR_WRAPPING_KEY_SIZE_RANGE 0x00000114UL +++#define CKR_WRAPPING_KEY_TYPE_INCONSISTENT 0x00000115UL +++#define CKR_RANDOM_SEED_NOT_SUPPORTED 0x00000120UL +++ +++/* This is new to v2.0 */ +++#define CKR_RANDOM_NO_RNG 0x00000121UL +++ +++/* This is new to v2.11 */ +++#define CKR_DOMAIN_PARAMS_INVALID 0x00000130UL +++ +++/* This is new to v2.40 */ +++#define CKR_CURVE_NOT_SUPPORTED 0x00000140UL +++ +++/* These are new to v2.0 */ +++#define CKR_BUFFER_TOO_SMALL 0x00000150UL +++#define CKR_SAVED_STATE_INVALID 0x00000160UL +++#define CKR_INFORMATION_SENSITIVE 0x00000170UL +++#define CKR_STATE_UNSAVEABLE 0x00000180UL +++ +++/* These are new to v2.01 */ +++#define CKR_CRYPTOKI_NOT_INITIALIZED 0x00000190UL +++#define CKR_CRYPTOKI_ALREADY_INITIALIZED 0x00000191UL +++#define CKR_MUTEX_BAD 0x000001A0UL +++#define CKR_MUTEX_NOT_LOCKED 0x000001A1UL +++ +++/* These are new to v2.40 */ +++#define CKR_NEW_PIN_MODE 0x000001B0UL +++#define CKR_NEXT_OTP 0x000001B1UL +++#define CKR_EXCEEDED_MAX_ITERATIONS 0x000001B5UL +++#define CKR_FIPS_SELF_TEST_FAILED 0x000001B6UL +++#define CKR_LIBRARY_LOAD_FAILED 0x000001B7UL +++#define CKR_PIN_TOO_WEAK 0x000001B8UL +++#define CKR_PUBLIC_KEY_INVALID 0x000001B9UL +++ +++/* This is new to v2.20 */ +++#define CKR_FUNCTION_REJECTED 0x00000200UL +++ +++/* This is new to v3.0 */ +++#define CKR_TOKEN_RESOURCE_EXCEEDED 0x00000201UL +++#define CKR_OPERATION_CANCEL_FAILED 0x00000202UL ++ +++#define CKR_VENDOR_DEFINED 0x80000000UL ++ ++ /* CK_NOTIFY is an application callback that processes events */ ++ typedef CK_CALLBACK_FUNCTION(CK_RV, CK_NOTIFY)( ++- CK_SESSION_HANDLE hSession, /* the session's handle */ ++- CK_NOTIFICATION event, ++- CK_VOID_PTR pApplication /* passed to C_OpenSession */ ++-); +++ CK_SESSION_HANDLE hSession, /* the session's handle */ +++ CK_NOTIFICATION event, +++ CK_VOID_PTR pApplication /* passed to C_OpenSession */ +++ ); ++ ++- ++-/* CK_FUNCTION_LIST is a structure holding a Cryptoki spec +++/* CK_FUNCTION_LIST is a structure holding a PKCS #11 spec ++ * version and pointers of appropriate types to all the ++- * Cryptoki functions ++- */ +++ * PKCS #11 functions */ +++/* CK_FUNCTION_LIST is new for v2.0 */ ++ typedef struct CK_FUNCTION_LIST CK_FUNCTION_LIST; ++-typedef struct CK_FUNCTION_LIST_3_0 CK_FUNCTION_LIST_3_0; ++ ++ typedef CK_FUNCTION_LIST CK_PTR CK_FUNCTION_LIST_PTR; ++-typedef CK_FUNCTION_LIST_3_0 CK_PTR CK_FUNCTION_LIST_3_0_PTR; ++ ++ typedef CK_FUNCTION_LIST_PTR CK_PTR CK_FUNCTION_LIST_PTR_PTR; +++ +++/* These are new for v3.0 */ +++typedef struct CK_FUNCTION_LIST_3_0 CK_FUNCTION_LIST_3_0; +++typedef CK_FUNCTION_LIST_3_0 CK_PTR CK_FUNCTION_LIST_3_0_PTR; ++ typedef CK_FUNCTION_LIST_3_0_PTR CK_PTR CK_FUNCTION_LIST_3_0_PTR_PTR; ++ +++/* Interfaces are new in v3.0 */ ++ typedef struct CK_INTERFACE { ++- CK_CHAR *pInterfaceName; +++ CK_CHAR *pInterfaceName; ++ CK_VOID_PTR pFunctionList; ++- CK_FLAGS flags; +++ CK_FLAGS flags; ++ } CK_INTERFACE; ++ ++ typedef CK_INTERFACE CK_PTR CK_INTERFACE_PTR; ++ typedef CK_INTERFACE_PTR CK_PTR CK_INTERFACE_PTR_PTR; ++ ++-#define CKF_END_OF_MESSAGE 0x00000001UL ++- +++#define CKF_END_OF_MESSAGE 0x00000001UL +++#define CKF_INTERFACE_FORK_SAFE 0x00000001UL ++ ++ /* CK_CREATEMUTEX is an application callback for creating a ++- * mutex object ++- */ +++ * mutex object */ ++ typedef CK_CALLBACK_FUNCTION(CK_RV, CK_CREATEMUTEX)( ++- CK_VOID_PTR_PTR ppMutex /* location to receive ptr to mutex */ ++-); ++- +++ CK_VOID_PTR_PTR ppMutex /* location to receive ptr to mutex */ +++ ); ++ ++ /* CK_DESTROYMUTEX is an application callback for destroying a ++- * mutex object ++- */ +++ * mutex object */ ++ typedef CK_CALLBACK_FUNCTION(CK_RV, CK_DESTROYMUTEX)( ++- CK_VOID_PTR pMutex /* pointer to mutex */ ++-); ++- +++ CK_VOID_PTR pMutex /* pointer to mutex */ +++ ); ++ ++ /* CK_LOCKMUTEX is an application callback for locking a mutex */ ++ typedef CK_CALLBACK_FUNCTION(CK_RV, CK_LOCKMUTEX)( ++- CK_VOID_PTR pMutex /* pointer to mutex */ ++-); ++- +++ CK_VOID_PTR pMutex /* pointer to mutex */ +++ ); ++ ++ /* CK_UNLOCKMUTEX is an application callback for unlocking a ++- * mutex ++- */ +++ * mutex */ ++ typedef CK_CALLBACK_FUNCTION(CK_RV, CK_UNLOCKMUTEX)( ++- CK_VOID_PTR pMutex /* pointer to mutex */ ++-); ++- ++-/* Get functionlist flags */ ++-#define CKF_INTERFACE_FORK_SAFE 0x00000001UL +++ CK_VOID_PTR pMutex /* pointer to mutex */ +++ ); ++ ++ /* CK_C_INITIALIZE_ARGS provides the optional arguments to ++- * C_Initialize ++- */ +++ * C_Initialize */ ++ typedef struct CK_C_INITIALIZE_ARGS { ++- CK_CREATEMUTEX CreateMutex; +++ CK_CREATEMUTEX CreateMutex; ++ CK_DESTROYMUTEX DestroyMutex; ++- CK_LOCKMUTEX LockMutex; ++- CK_UNLOCKMUTEX UnlockMutex; ++- CK_FLAGS flags; ++- CK_VOID_PTR pReserved; +++ CK_LOCKMUTEX LockMutex; +++ CK_UNLOCKMUTEX UnlockMutex; +++ CK_FLAGS flags; +++ /* The official PKCS #11 spec does not have a 'LibraryParameters' field, but +++ * a reserved field. NSS needs a way to pass instance-specific information +++ * to the library (like where to find its config files, etc). This +++ * information is usually provided by the installer and passed uninterpreted +++ * by NSS to the library, though NSS does know the specifics of the softoken +++ * version of this parameter. Most compliant PKCS#11 modules expect this +++ * parameter to be NULL, and will return CKR_ARGUMENTS_BAD from +++ * C_Initialize if Library parameters is supplied. */ +++ CK_CHAR_PTR *LibraryParameters; +++ /* This field is only present if the LibraryParameters is not NULL. It must +++ * be NULL in all cases */ +++ CK_VOID_PTR pReserved; ++ } CK_C_INITIALIZE_ARGS; ++ ++ /* flags: bit flags that provide capabilities of the slot ++ * Bit Flag Mask Meaning ++ */ ++ #define CKF_LIBRARY_CANT_CREATE_OS_THREADS 0x00000001UL ++-#define CKF_OS_LOCKING_OK 0x00000002UL +++#define CKF_OS_LOCKING_OK 0x00000002UL ++ ++ typedef CK_C_INITIALIZE_ARGS CK_PTR CK_C_INITIALIZE_ARGS_PTR; ++ ++- ++- ++ /* additional flags for parameters to functions */ ++ ++ /* CKF_DONT_BLOCK is for the function C_WaitForSlotEvent */ ++-#define CKF_DONT_BLOCK 1 +++#define CKF_DONT_BLOCK 1 ++ ++-/* CK_RSA_PKCS_MGF_TYPE is used to indicate the Message +++/* CK_RSA_PKCS_OAEP_MGF_TYPE is new for v2.10. +++ * CK_RSA_PKCS_OAEP_MGF_TYPE is used to indicate the Message ++ * Generation Function (MGF) applied to a message block when ++ * formatting a message block for the PKCS #1 OAEP encryption ++- * scheme. ++- */ +++ * scheme. */ ++ typedef CK_ULONG CK_RSA_PKCS_MGF_TYPE; ++ ++ typedef CK_RSA_PKCS_MGF_TYPE CK_PTR CK_RSA_PKCS_MGF_TYPE_PTR; ++ ++ /* The following MGFs are defined */ ++-#define CKG_MGF1_SHA1 0x00000001UL ++-#define CKG_MGF1_SHA256 0x00000002UL ++-#define CKG_MGF1_SHA384 0x00000003UL ++-#define CKG_MGF1_SHA512 0x00000004UL ++-#define CKG_MGF1_SHA224 0x00000005UL ++-#define CKG_MGF1_SHA3_224 0x00000006UL ++-#define CKG_MGF1_SHA3_256 0x00000007UL ++-#define CKG_MGF1_SHA3_384 0x00000008UL ++-#define CKG_MGF1_SHA3_512 0x00000009UL +++/* CKG_MGF1_SHA256, CKG_MGF1_SHA384, and CKG_MGF1_SHA512 +++ * are new for v2.20 */ +++#define CKG_MGF1_SHA1 0x00000001UL +++#define CKG_MGF1_SHA256 0x00000002UL +++#define CKG_MGF1_SHA384 0x00000003UL +++#define CKG_MGF1_SHA512 0x00000004UL +++ +++/* v2.20 amendment 3 */ +++#define CKG_MGF1_SHA224 0x00000005UL +++ +++/* v2.40 */ +++#define CKG_MGF1_SHA3_224 0x00000006UL +++#define CKG_MGF1_SHA3_256 0x00000007UL +++#define CKG_MGF1_SHA3_384 0x00000008UL +++#define CKG_MGF1_SHA3_512 0x00000009UL ++ ++-/* CK_RSA_PKCS_OAEP_SOURCE_TYPE is used to indicate the source +++/* CK_RSA_PKCS_OAEP_SOURCE_TYPE is new for v2.10. +++ * CK_RSA_PKCS_OAEP_SOURCE_TYPE is used to indicate the source ++ * of the encoding parameter when formatting a message block ++- * for the PKCS #1 OAEP encryption scheme. ++- */ +++ * for the PKCS #1 OAEP encryption scheme. */ ++ typedef CK_ULONG CK_RSA_PKCS_OAEP_SOURCE_TYPE; ++ ++ typedef CK_RSA_PKCS_OAEP_SOURCE_TYPE CK_PTR CK_RSA_PKCS_OAEP_SOURCE_TYPE_PTR; ++ ++ /* The following encoding parameter sources are defined */ ++-#define CKZ_DATA_SPECIFIED 0x00000001UL +++#define CKZ_DATA_SPECIFIED 0x00000001UL ++ ++-/* CK_RSA_PKCS_OAEP_PARAMS provides the parameters to the ++- * CKM_RSA_PKCS_OAEP mechanism. ++- */ +++/* CK_RSA_PKCS_OAEP_PARAMS is new for v2.10. +++ * CK_RSA_PKCS_OAEP_PARAMS provides the parameters to the +++ * CKM_RSA_PKCS_OAEP mechanism. */ ++ typedef struct CK_RSA_PKCS_OAEP_PARAMS { ++- CK_MECHANISM_TYPE hashAlg; ++- CK_RSA_PKCS_MGF_TYPE mgf; +++ CK_MECHANISM_TYPE hashAlg; +++ CK_RSA_PKCS_MGF_TYPE mgf; ++ CK_RSA_PKCS_OAEP_SOURCE_TYPE source; ++- CK_VOID_PTR pSourceData; ++- CK_ULONG ulSourceDataLen; +++ CK_VOID_PTR pSourceData; +++ CK_ULONG ulSourceDataLen; ++ } CK_RSA_PKCS_OAEP_PARAMS; ++ ++ typedef CK_RSA_PKCS_OAEP_PARAMS CK_PTR CK_RSA_PKCS_OAEP_PARAMS_PTR; ++ ++-/* CK_RSA_PKCS_PSS_PARAMS provides the parameters to the ++- * CKM_RSA_PKCS_PSS mechanism(s). ++- */ +++/* CK_RSA_PKCS_PSS_PARAMS is new for v2.11. +++ * CK_RSA_PKCS_PSS_PARAMS provides the parameters to the +++ * CKM_RSA_PKCS_PSS mechanism(s). */ ++ typedef struct CK_RSA_PKCS_PSS_PARAMS { ++- CK_MECHANISM_TYPE hashAlg; +++ CK_MECHANISM_TYPE hashAlg; ++ CK_RSA_PKCS_MGF_TYPE mgf; ++- CK_ULONG sLen; +++ CK_ULONG sLen; ++ } CK_RSA_PKCS_PSS_PARAMS; ++ ++ typedef CK_RSA_PKCS_PSS_PARAMS CK_PTR CK_RSA_PKCS_PSS_PARAMS_PTR; ++ +++/* CK_EC_KDF_TYPE is new for v2.11. */ ++ typedef CK_ULONG CK_EC_KDF_TYPE; ++-typedef CK_EC_KDF_TYPE CK_PTR CK_EC_KDF_TYPE_PTR; ++ ++ /* The following EC Key Derivation Functions are defined */ ++-#define CKD_NULL 0x00000001UL ++-#define CKD_SHA1_KDF 0x00000002UL ++- ++-/* The following X9.42 DH key derivation functions are defined */ ++-#define CKD_SHA1_KDF_ASN1 0x00000003UL ++-#define CKD_SHA1_KDF_CONCATENATE 0x00000004UL ++-#define CKD_SHA224_KDF 0x00000005UL ++-#define CKD_SHA256_KDF 0x00000006UL ++-#define CKD_SHA384_KDF 0x00000007UL ++-#define CKD_SHA512_KDF 0x00000008UL ++-#define CKD_CPDIVERSIFY_KDF 0x00000009UL ++-#define CKD_SHA3_224_KDF 0x0000000AUL ++-#define CKD_SHA3_256_KDF 0x0000000BUL ++-#define CKD_SHA3_384_KDF 0x0000000CUL ++-#define CKD_SHA3_512_KDF 0x0000000DUL ++-#define CKD_SHA1_KDF_SP800 0x0000000EUL ++-#define CKD_SHA224_KDF_SP800 0x0000000FUL ++-#define CKD_SHA256_KDF_SP800 0x00000010UL ++-#define CKD_SHA384_KDF_SP800 0x00000011UL ++-#define CKD_SHA512_KDF_SP800 0x00000012UL ++-#define CKD_SHA3_224_KDF_SP800 0x00000013UL ++-#define CKD_SHA3_256_KDF_SP800 0x00000014UL ++-#define CKD_SHA3_384_KDF_SP800 0x00000015UL ++-#define CKD_SHA3_512_KDF_SP800 0x00000016UL ++-#define CKD_BLAKE2B_160_KDF 0x00000017UL ++-#define CKD_BLAKE2B_256_KDF 0x00000018UL ++-#define CKD_BLAKE2B_384_KDF 0x00000019UL ++-#define CKD_BLAKE2B_512_KDF 0x0000001aUL +++#define CKD_NULL 0x00000001UL +++#define CKD_SHA1_KDF 0x00000002UL +++#define CKD_SHA224_KDF 0x00000005UL +++#define CKD_SHA256_KDF 0x00000006UL +++#define CKD_SHA384_KDF 0x00000007UL +++#define CKD_SHA512_KDF 0x00000008UL +++ +++/* new for v2.40 */ +++#define CKD_CPDIVERSIFY_KDF 0x00000009UL +++#define CKD_SHA3_224_KDF 0x0000000AUL +++#define CKD_SHA3_256_KDF 0x0000000BUL +++#define CKD_SHA3_384_KDF 0x0000000CUL +++#define CKD_SHA3_512_KDF 0x0000000DUL +++ +++/* new for v3.0 */ +++#define CKD_SHA1_KDF_SP800 0x0000000EUL +++#define CKD_SHA224_KDF_SP800 0x0000000FUL +++#define CKD_SHA256_KDF_SP800 0x00000010UL +++#define CKD_SHA384_KDF_SP800 0x00000011UL +++#define CKD_SHA512_KDF_SP800 0x00000012UL +++#define CKD_SHA3_224_KDF_SP800 0x00000013UL +++#define CKD_SHA3_256_KDF_SP800 0x00000014UL +++#define CKD_SHA3_384_KDF_SP800 0x00000015UL +++#define CKD_SHA3_512_KDF_SP800 0x00000016UL +++#define CKD_BLAKE2B_160_KDF 0x00000017UL +++#define CKD_BLAKE2B_256_KDF 0x00000018UL +++#define CKD_BLAKE2B_384_KDF 0x00000019UL +++#define CKD_BLAKE2B_512_KDF 0x0000001aUL ++ ++-/* CK_ECDH1_DERIVE_PARAMS provides the parameters to the +++/* CK_ECDH1_DERIVE_PARAMS is new for v2.11. +++ * CK_ECDH1_DERIVE_PARAMS provides the parameters to the ++ * CKM_ECDH1_DERIVE and CKM_ECDH1_COFACTOR_DERIVE mechanisms, ++ * where each party contributes one key pair. ++ */ ++ typedef struct CK_ECDH1_DERIVE_PARAMS { ++ CK_EC_KDF_TYPE kdf; ++- CK_ULONG ulSharedDataLen; ++- CK_BYTE_PTR pSharedData; ++- CK_ULONG ulPublicDataLen; ++- CK_BYTE_PTR pPublicData; +++ CK_ULONG ulSharedDataLen; +++ CK_BYTE_PTR pSharedData; +++ CK_ULONG ulPublicDataLen; +++ CK_BYTE_PTR pPublicData; ++ } CK_ECDH1_DERIVE_PARAMS; ++ ++ typedef CK_ECDH1_DERIVE_PARAMS CK_PTR CK_ECDH1_DERIVE_PARAMS_PTR; ++ ++-/* +++/* CK_ECDH2_DERIVE_PARAMS is new for v2.11. ++ * CK_ECDH2_DERIVE_PARAMS provides the parameters to the ++- * CKM_ECMQV_DERIVE mechanism, where each party contributes two key pairs. ++- */ +++ * CKM_ECMQV_DERIVE mechanism, where each party contributes two key pairs. */ ++ typedef struct CK_ECDH2_DERIVE_PARAMS { ++- CK_EC_KDF_TYPE kdf; ++- CK_ULONG ulSharedDataLen; ++- CK_BYTE_PTR pSharedData; ++- CK_ULONG ulPublicDataLen; ++- CK_BYTE_PTR pPublicData; ++- CK_ULONG ulPrivateDataLen; +++ CK_EC_KDF_TYPE kdf; +++ CK_ULONG ulSharedDataLen; +++ CK_BYTE_PTR pSharedData; +++ CK_ULONG ulPublicDataLen; +++ CK_BYTE_PTR pPublicData; +++ CK_ULONG ulPrivateDataLen; ++ CK_OBJECT_HANDLE hPrivateData; ++- CK_ULONG ulPublicDataLen2; ++- CK_BYTE_PTR pPublicData2; +++ CK_ULONG ulPublicDataLen2; +++ CK_BYTE_PTR pPublicData2; ++ } CK_ECDH2_DERIVE_PARAMS; ++ ++ typedef CK_ECDH2_DERIVE_PARAMS CK_PTR CK_ECDH2_DERIVE_PARAMS_PTR; ++ ++ typedef struct CK_ECMQV_DERIVE_PARAMS { ++- CK_EC_KDF_TYPE kdf; ++- CK_ULONG ulSharedDataLen; ++- CK_BYTE_PTR pSharedData; ++- CK_ULONG ulPublicDataLen; ++- CK_BYTE_PTR pPublicData; ++- CK_ULONG ulPrivateDataLen; +++ CK_EC_KDF_TYPE kdf; +++ CK_ULONG ulSharedDataLen; +++ CK_BYTE_PTR pSharedData; +++ CK_ULONG ulPublicDataLen; +++ CK_BYTE_PTR pPublicData; +++ CK_ULONG ulPrivateDataLen; ++ CK_OBJECT_HANDLE hPrivateData; ++- CK_ULONG ulPublicDataLen2; ++- CK_BYTE_PTR pPublicData2; +++ CK_ULONG ulPublicDataLen2; +++ CK_BYTE_PTR pPublicData2; ++ CK_OBJECT_HANDLE publicKey; ++ } CK_ECMQV_DERIVE_PARAMS; ++ ++ typedef CK_ECMQV_DERIVE_PARAMS CK_PTR CK_ECMQV_DERIVE_PARAMS_PTR; ++ ++ /* Typedefs and defines for the CKM_X9_42_DH_KEY_PAIR_GEN and the ++- * CKM_X9_42_DH_PARAMETER_GEN mechanisms ++- */ +++ * CKM_X9_42_DH_PARAMETER_GEN mechanisms (new for PKCS #11 v2.11) */ ++ typedef CK_ULONG CK_X9_42_DH_KDF_TYPE; ++ typedef CK_X9_42_DH_KDF_TYPE CK_PTR CK_X9_42_DH_KDF_TYPE_PTR; ++ ++-/* CK_X9_42_DH1_DERIVE_PARAMS provides the parameters to the +++/* The following X9.42 DH key derivation functions are defined +++ (besides CKD_NULL already defined : */ +++#define CKD_SHA1_KDF_ASN1 0x00000003UL +++#define CKD_SHA1_KDF_CONCATENATE 0x00000004UL +++ +++/* CK_X9_42_DH1_DERIVE_PARAMS is new for v2.11. +++ * CK_X9_42_DH1_DERIVE_PARAMS provides the parameters to the ++ * CKM_X9_42_DH_DERIVE key derivation mechanism, where each party ++- * contributes one key pair ++- */ +++ * contributes one key pair */ ++ typedef struct CK_X9_42_DH1_DERIVE_PARAMS { ++ CK_X9_42_DH_KDF_TYPE kdf; ++- CK_ULONG ulOtherInfoLen; ++- CK_BYTE_PTR pOtherInfo; ++- CK_ULONG ulPublicDataLen; ++- CK_BYTE_PTR pPublicData; +++ CK_ULONG ulOtherInfoLen; +++ CK_BYTE_PTR pOtherInfo; +++ CK_ULONG ulPublicDataLen; +++ CK_BYTE_PTR pPublicData; ++ } CK_X9_42_DH1_DERIVE_PARAMS; ++ ++ typedef struct CK_X9_42_DH1_DERIVE_PARAMS CK_PTR CK_X9_42_DH1_DERIVE_PARAMS_PTR; ++ ++-/* CK_X9_42_DH2_DERIVE_PARAMS provides the parameters to the +++/* CK_X9_42_DH2_DERIVE_PARAMS is new for v2.11. +++ * CK_X9_42_DH2_DERIVE_PARAMS provides the parameters to the ++ * CKM_X9_42_DH_HYBRID_DERIVE and CKM_X9_42_MQV_DERIVE key derivation ++- * mechanisms, where each party contributes two key pairs ++- */ +++ * mechanisms, where each party contributes two key pairs */ ++ typedef struct CK_X9_42_DH2_DERIVE_PARAMS { ++ CK_X9_42_DH_KDF_TYPE kdf; ++- CK_ULONG ulOtherInfoLen; ++- CK_BYTE_PTR pOtherInfo; ++- CK_ULONG ulPublicDataLen; ++- CK_BYTE_PTR pPublicData; ++- CK_ULONG ulPrivateDataLen; ++- CK_OBJECT_HANDLE hPrivateData; ++- CK_ULONG ulPublicDataLen2; ++- CK_BYTE_PTR pPublicData2; +++ CK_ULONG ulOtherInfoLen; +++ CK_BYTE_PTR pOtherInfo; +++ CK_ULONG ulPublicDataLen; +++ CK_BYTE_PTR pPublicData; +++ CK_ULONG ulPrivateDataLen; +++ CK_OBJECT_HANDLE hPrivateData; +++ CK_ULONG ulPublicDataLen2; +++ CK_BYTE_PTR pPublicData2; ++ } CK_X9_42_DH2_DERIVE_PARAMS; ++ ++ typedef CK_X9_42_DH2_DERIVE_PARAMS CK_PTR CK_X9_42_DH2_DERIVE_PARAMS_PTR; ++ ++ typedef struct CK_X9_42_MQV_DERIVE_PARAMS { ++ CK_X9_42_DH_KDF_TYPE kdf; ++- CK_ULONG ulOtherInfoLen; ++- CK_BYTE_PTR pOtherInfo; ++- CK_ULONG ulPublicDataLen; ++- CK_BYTE_PTR pPublicData; ++- CK_ULONG ulPrivateDataLen; ++- CK_OBJECT_HANDLE hPrivateData; ++- CK_ULONG ulPublicDataLen2; ++- CK_BYTE_PTR pPublicData2; ++- CK_OBJECT_HANDLE publicKey; +++ CK_ULONG ulOtherInfoLen; +++ CK_BYTE_PTR pOtherInfo; +++ CK_ULONG ulPublicDataLen; +++ CK_BYTE_PTR pPublicData; +++ CK_ULONG ulPrivateDataLen; +++ CK_OBJECT_HANDLE hPrivateData; +++ CK_ULONG ulPublicDataLen2; +++ CK_BYTE_PTR pPublicData2; +++ CK_OBJECT_HANDLE publicKey; ++ } CK_X9_42_MQV_DERIVE_PARAMS; ++ ++ typedef CK_X9_42_MQV_DERIVE_PARAMS CK_PTR CK_X9_42_MQV_DERIVE_PARAMS_PTR; ++ ++ /* CK_KEA_DERIVE_PARAMS provides the parameters to the ++- * CKM_KEA_DERIVE mechanism ++- */ +++ * CKM_KEA_DERIVE mechanism */ +++/* CK_KEA_DERIVE_PARAMS is new for v2.0 */ ++ typedef struct CK_KEA_DERIVE_PARAMS { ++- CK_BBOOL isSender; ++- CK_ULONG ulRandomLen; +++ CK_BBOOL isSender; +++ CK_ULONG ulRandomLen; ++ CK_BYTE_PTR pRandomA; ++ CK_BYTE_PTR pRandomB; ++- CK_ULONG ulPublicDataLen; +++ CK_ULONG ulPublicDataLen; ++ CK_BYTE_PTR pPublicData; ++ } CK_KEA_DERIVE_PARAMS; ++ ++ typedef CK_KEA_DERIVE_PARAMS CK_PTR CK_KEA_DERIVE_PARAMS_PTR; ++ ++- ++ /* CK_RC2_PARAMS provides the parameters to the CKM_RC2_ECB and ++ * CKM_RC2_MAC mechanisms. An instance of CK_RC2_PARAMS just ++- * holds the effective keysize ++- */ ++-typedef CK_ULONG CK_RC2_PARAMS; +++ * holds the effective keysize */ +++typedef CK_ULONG CK_RC2_PARAMS; ++ ++ typedef CK_RC2_PARAMS CK_PTR CK_RC2_PARAMS_PTR; ++ ++- ++ /* CK_RC2_CBC_PARAMS provides the parameters to the CKM_RC2_CBC ++- * mechanism ++- */ +++ * mechanism */ ++ typedef struct CK_RC2_CBC_PARAMS { ++- CK_ULONG ulEffectiveBits; /* effective bits (1-1024) */ ++- CK_BYTE iv[8]; /* IV for CBC mode */ +++ /* ulEffectiveBits was changed from CK_USHORT to CK_ULONG for +++ * v2.0 */ +++ CK_ULONG ulEffectiveBits; /* effective bits (1-1024) */ +++ +++ CK_BYTE iv[8]; /* IV for CBC mode */ ++ } CK_RC2_CBC_PARAMS; ++ ++ typedef CK_RC2_CBC_PARAMS CK_PTR CK_RC2_CBC_PARAMS_PTR; ++ ++- ++ /* CK_RC2_MAC_GENERAL_PARAMS provides the parameters for the ++- * CKM_RC2_MAC_GENERAL mechanism ++- */ +++ * CKM_RC2_MAC_GENERAL mechanism */ +++/* CK_RC2_MAC_GENERAL_PARAMS is new for v2.0 */ ++ typedef struct CK_RC2_MAC_GENERAL_PARAMS { ++- CK_ULONG ulEffectiveBits; /* effective bits (1-1024) */ ++- CK_ULONG ulMacLength; /* Length of MAC in bytes */ +++ CK_ULONG ulEffectiveBits; /* effective bits (1-1024) */ +++ CK_ULONG ulMacLength; /* Length of MAC in bytes */ ++ } CK_RC2_MAC_GENERAL_PARAMS; ++ ++-typedef CK_RC2_MAC_GENERAL_PARAMS CK_PTR \ ++- CK_RC2_MAC_GENERAL_PARAMS_PTR; ++- +++typedef CK_RC2_MAC_GENERAL_PARAMS CK_PTR +++ CK_RC2_MAC_GENERAL_PARAMS_PTR; ++ ++ /* CK_RC5_PARAMS provides the parameters to the CKM_RC5_ECB and ++- * CKM_RC5_MAC mechanisms ++- */ +++ * CKM_RC5_MAC mechanisms */ +++/* CK_RC5_PARAMS is new for v2.0 */ ++ typedef struct CK_RC5_PARAMS { ++- CK_ULONG ulWordsize; /* wordsize in bits */ ++- CK_ULONG ulRounds; /* number of rounds */ +++ CK_ULONG ulWordsize; /* wordsize in bits */ +++ CK_ULONG ulRounds; /* number of rounds */ ++ } CK_RC5_PARAMS; ++ ++ typedef CK_RC5_PARAMS CK_PTR CK_RC5_PARAMS_PTR; ++ ++- ++ /* CK_RC5_CBC_PARAMS provides the parameters to the CKM_RC5_CBC ++- * mechanism ++- */ +++ * mechanism */ +++/* CK_RC5_CBC_PARAMS is new for v2.0 */ ++ typedef struct CK_RC5_CBC_PARAMS { ++- CK_ULONG ulWordsize; /* wordsize in bits */ ++- CK_ULONG ulRounds; /* number of rounds */ ++- CK_BYTE_PTR pIv; /* pointer to IV */ ++- CK_ULONG ulIvLen; /* length of IV in bytes */ +++ CK_ULONG ulWordsize; /* wordsize in bits */ +++ CK_ULONG ulRounds; /* number of rounds */ +++ CK_BYTE_PTR pIv; /* pointer to IV */ +++ CK_ULONG ulIvLen; /* length of IV in bytes */ ++ } CK_RC5_CBC_PARAMS; ++ ++ typedef CK_RC5_CBC_PARAMS CK_PTR CK_RC5_CBC_PARAMS_PTR; ++ ++- ++ /* CK_RC5_MAC_GENERAL_PARAMS provides the parameters for the ++- * CKM_RC5_MAC_GENERAL mechanism ++- */ +++ * CKM_RC5_MAC_GENERAL mechanism */ +++/* CK_RC5_MAC_GENERAL_PARAMS is new for v2.0 */ ++ typedef struct CK_RC5_MAC_GENERAL_PARAMS { ++- CK_ULONG ulWordsize; /* wordsize in bits */ ++- CK_ULONG ulRounds; /* number of rounds */ ++- CK_ULONG ulMacLength; /* Length of MAC in bytes */ +++ CK_ULONG ulWordsize; /* wordsize in bits */ +++ CK_ULONG ulRounds; /* number of rounds */ +++ CK_ULONG ulMacLength; /* Length of MAC in bytes */ ++ } CK_RC5_MAC_GENERAL_PARAMS; ++ ++-typedef CK_RC5_MAC_GENERAL_PARAMS CK_PTR \ ++- CK_RC5_MAC_GENERAL_PARAMS_PTR; +++typedef CK_RC5_MAC_GENERAL_PARAMS CK_PTR +++ CK_RC5_MAC_GENERAL_PARAMS_PTR; ++ ++ /* CK_MAC_GENERAL_PARAMS provides the parameters to most block ++ * ciphers' MAC_GENERAL mechanisms. Its value is the length of ++- * the MAC ++- */ ++-typedef CK_ULONG CK_MAC_GENERAL_PARAMS; +++ * the MAC */ +++/* CK_MAC_GENERAL_PARAMS is new for v2.0 */ +++typedef CK_ULONG CK_MAC_GENERAL_PARAMS; ++ ++ typedef CK_MAC_GENERAL_PARAMS CK_PTR CK_MAC_GENERAL_PARAMS_PTR; ++ +++/* CK_DES/AES_ECB/CBC_ENCRYPT_DATA_PARAMS are new for v2.20 */ ++ typedef struct CK_DES_CBC_ENCRYPT_DATA_PARAMS { ++- CK_BYTE iv[8]; +++ CK_BYTE iv[8]; ++ CK_BYTE_PTR pData; ++- CK_ULONG length; +++ CK_ULONG length; ++ } CK_DES_CBC_ENCRYPT_DATA_PARAMS; ++ ++-typedef CK_DES_CBC_ENCRYPT_DATA_PARAMS CK_PTR \ ++- CK_DES_CBC_ENCRYPT_DATA_PARAMS_PTR; +++typedef CK_DES_CBC_ENCRYPT_DATA_PARAMS CK_PTR CK_DES_CBC_ENCRYPT_DATA_PARAMS_PTR; ++ ++ typedef struct CK_AES_CBC_ENCRYPT_DATA_PARAMS { ++- CK_BYTE iv[16]; +++ CK_BYTE iv[16]; ++ CK_BYTE_PTR pData; ++- CK_ULONG length; +++ CK_ULONG length; ++ } CK_AES_CBC_ENCRYPT_DATA_PARAMS; ++ ++-typedef CK_AES_CBC_ENCRYPT_DATA_PARAMS CK_PTR \ ++- CK_AES_CBC_ENCRYPT_DATA_PARAMS_PTR; +++typedef CK_AES_CBC_ENCRYPT_DATA_PARAMS CK_PTR CK_AES_CBC_ENCRYPT_DATA_PARAMS_PTR; +++ +++/* CK_AES_CTR_PARAMS is new for PKCS #11 v2.20 amendment 3 */ +++typedef struct CK_AES_CTR_PARAMS { +++ CK_ULONG ulCounterBits; +++ CK_BYTE cb[16]; +++} CK_AES_CTR_PARAMS; +++ +++typedef CK_AES_CTR_PARAMS CK_PTR CK_AES_CTR_PARAMS_PTR; +++ +++/* CK_GCM_PARAMS is new for version 2.30 */ +++/* There was a discrepency between the doc and the headers +++ * in PKCS #11 v2.40, NSS had the doc version, but the header +++ * was normative. In V3.0 they were reconsiled as the header +++ * version. In NSS the header version is called CK_GCM_PARAMS_V3 +++ * and the v2.40 doc version is called CK_NSS_GCM_PARAMS. +++ * CK_GCM_PARMS is define as CK_NSS_GCM_PARAMS if +++ * NSS_PCKS11_2_0_COMPAT is defined and CK_GCM_PARAMS_V3 if it's not. +++ * Softoken accepts either version and internally uses CK_NSS_GCM_PARAMS */ +++typedef struct CK_GCM_PARAMS_V3 { +++ CK_BYTE_PTR pIv; +++ CK_ULONG ulIvLen; +++ CK_ULONG ulIvBits; +++ CK_BYTE_PTR pAAD; +++ CK_ULONG ulAADLen; +++ CK_ULONG ulTagBits; +++} CK_GCM_PARAMS_V3; +++ +++typedef CK_GCM_PARAMS_V3 CK_PTR CK_GCM_PARAMS_V3_PTR; +++ +++/* CK_CCM_PARAMS is new for version 2.30 */ +++typedef struct CK_CCM_PARAMS { +++ CK_ULONG ulDataLen; +++ CK_BYTE_PTR pNonce; +++ CK_ULONG ulNonceLen; +++ CK_BYTE_PTR pAAD; +++ CK_ULONG ulAADLen; +++ CK_ULONG ulMACLen; +++} CK_CCM_PARAMS; +++ +++typedef CK_CCM_PARAMS CK_PTR CK_CCM_PARAMS_PTR; +++ +++/* SALSA20_POLY1305 and CHACHA20_POLY1305 is AEAD is new in v3.0 */ +++typedef struct CK_SALSA20_CHACHA20_POLY1305_PARAMS { +++ CK_BYTE_PTR pNonce; +++ CK_ULONG ulNonceLen; +++ CK_BYTE_PTR pAAD; +++ CK_ULONG ulAADLen; +++} CK_SALSA20_CHACHA20_POLY1305_PARAMS; +++ +++typedef CK_SALSA20_CHACHA20_POLY1305_PARAMS +++ CK_PTR CK_SALSA20_CHACHA20_POLY1305_PARAMS_PTR; +++ +++/* MESSAGE params are new for v3.0 */ +++typedef CK_ULONG CK_GENERATOR_FUNCTION; +++#define CKG_NO_GENERATE 0x00000000UL +++#define CKG_GENERATE 0x00000001UL +++#define CKG_GENERATE_COUNTER 0x00000002UL +++#define CKG_GENERATE_RANDOM 0x00000003UL +++#define CKG_GENERATE_COUNTER_XOR 0x00000004UL +++ +++typedef struct CK_GCM_MESSAGE_PARAMS { +++ CK_BYTE_PTR pIv; +++ CK_ULONG ulIvLen; +++ CK_ULONG ulIvFixedBits; +++ CK_GENERATOR_FUNCTION ivGenerator; +++ CK_BYTE_PTR pTag; +++ CK_ULONG ulTagBits; +++} CK_GCM_MESSAGE_PARAMS; +++ +++typedef CK_GCM_MESSAGE_PARAMS CK_GCM_MESSAGE_PARAMS_PTR; +++ +++typedef struct CK_CCM_MESSAGE_PARAMS { +++ CK_ULONG ulDataLen; /*plaintext or ciphertext*/ +++ CK_BYTE_PTR pNonce; +++ CK_ULONG ulNonceLen; +++ CK_ULONG ulNonceFixedBits; +++ CK_GENERATOR_FUNCTION nonceGenerator; +++ CK_BYTE_PTR pMAC; +++ CK_ULONG ulMACLen; +++} CK_CCM_MESSAGE_PARAMS; +++ +++typedef CK_CCM_MESSAGE_PARAMS CK_CCM_MESSAGE_PARAMS_PTR; +++ +++/* SALSA20/CHACHA20 doe not define IV generators */ +++typedef struct CK_SALSA20_CHACHA20_POLY1305_MSG_PARAMS { +++ CK_BYTE_PTR pNonce; +++ CK_ULONG ulNonceLen; +++ CK_BYTE_PTR pTag; +++} CK_SALSA20_CHACHA20_POLY1305_MSG_PARAMS; +++ +++typedef CK_SALSA20_CHACHA20_POLY1305_MSG_PARAMS +++ CK_PTR CK_SALSA20_CHACHA20_POLY1305_MSG_PARAMS_PTR; ++ ++ /* CK_SKIPJACK_PRIVATE_WRAP_PARAMS provides the parameters to the ++- * CKM_SKIPJACK_PRIVATE_WRAP mechanism ++- */ +++ * CKM_SKIPJACK_PRIVATE_WRAP mechanism */ +++/* CK_SKIPJACK_PRIVATE_WRAP_PARAMS is new for v2.0 */ ++ typedef struct CK_SKIPJACK_PRIVATE_WRAP_PARAMS { ++- CK_ULONG ulPasswordLen; +++ CK_ULONG ulPasswordLen; ++ CK_BYTE_PTR pPassword; ++- CK_ULONG ulPublicDataLen; +++ CK_ULONG ulPublicDataLen; ++ CK_BYTE_PTR pPublicData; ++- CK_ULONG ulPAndGLen; ++- CK_ULONG ulQLen; ++- CK_ULONG ulRandomLen; +++ CK_ULONG ulPAndGLen; +++ CK_ULONG ulQLen; +++ CK_ULONG ulRandomLen; ++ CK_BYTE_PTR pRandomA; ++ CK_BYTE_PTR pPrimeP; ++ CK_BYTE_PTR pBaseG; ++ CK_BYTE_PTR pSubprimeQ; ++ } CK_SKIPJACK_PRIVATE_WRAP_PARAMS; ++ ++-typedef CK_SKIPJACK_PRIVATE_WRAP_PARAMS CK_PTR \ ++- CK_SKIPJACK_PRIVATE_WRAP_PARAMS_PTR; ++- +++typedef CK_SKIPJACK_PRIVATE_WRAP_PARAMS CK_PTR +++ CK_SKIPJACK_PRIVATE_WRAP_PTR; ++ ++ /* CK_SKIPJACK_RELAYX_PARAMS provides the parameters to the ++- * CKM_SKIPJACK_RELAYX mechanism ++- */ +++ * CKM_SKIPJACK_RELAYX mechanism */ +++/* CK_SKIPJACK_RELAYX_PARAMS is new for v2.0 */ ++ typedef struct CK_SKIPJACK_RELAYX_PARAMS { ++- CK_ULONG ulOldWrappedXLen; +++ CK_ULONG ulOldWrappedXLen; ++ CK_BYTE_PTR pOldWrappedX; ++- CK_ULONG ulOldPasswordLen; +++ CK_ULONG ulOldPasswordLen; ++ CK_BYTE_PTR pOldPassword; ++- CK_ULONG ulOldPublicDataLen; +++ CK_ULONG ulOldPublicDataLen; ++ CK_BYTE_PTR pOldPublicData; ++- CK_ULONG ulOldRandomLen; +++ CK_ULONG ulOldRandomLen; ++ CK_BYTE_PTR pOldRandomA; ++- CK_ULONG ulNewPasswordLen; +++ CK_ULONG ulNewPasswordLen; ++ CK_BYTE_PTR pNewPassword; ++- CK_ULONG ulNewPublicDataLen; +++ CK_ULONG ulNewPublicDataLen; ++ CK_BYTE_PTR pNewPublicData; ++- CK_ULONG ulNewRandomLen; +++ CK_ULONG ulNewRandomLen; ++ CK_BYTE_PTR pNewRandomA; ++ } CK_SKIPJACK_RELAYX_PARAMS; ++ ++-typedef CK_SKIPJACK_RELAYX_PARAMS CK_PTR \ ++- CK_SKIPJACK_RELAYX_PARAMS_PTR; +++typedef CK_SKIPJACK_RELAYX_PARAMS CK_PTR +++ CK_SKIPJACK_RELAYX_PARAMS_PTR; +++ +++/* New for v2.40, CAMELLIA, ARIA, SEED */ +++typedef struct CK_CAMELLIA_CTR_PARAMS { +++ CK_ULONG ulCounterBits; +++ CK_BYTE cb[16]; +++} CK_CAMELLIA_CTR_PARAMS; +++ +++typedef CK_CAMELLIA_CTR_PARAMS CK_PTR CK_CAMELLIA_CTR_PARAMS_PTR; ++ +++typedef struct CK_CAMELLIA_CBC_ENCRYPT_DATA_PARAMS { +++ CK_BYTE iv[16]; +++ CK_BYTE_PTR pData; +++ CK_ULONG length; +++} CK_CAMELLIA_CBC_ENCRYPT_DATA_PARAMS; +++ +++typedef CK_CAMELLIA_CBC_ENCRYPT_DATA_PARAMS CK_PTR +++ CK_CAMELLIA_CBC_ENCRYPT_DATA_PARAMS_PTR; +++ +++typedef struct CK_ARIA_CBC_ENCRYPT_DATA_PARAMS { +++ CK_BYTE iv[16]; +++ CK_BYTE_PTR pData; +++ CK_ULONG length; +++} CK_ARIA_CBC_ENCRYPT_DATA_PARAMS; +++ +++typedef CK_ARIA_CBC_ENCRYPT_DATA_PARAMS CK_PTR +++ CK_ARIA_CBC_ENCRYPT_DATA_PARAMS_PTR; +++ +++typedef struct CK_SEED_CBC_ENCRYPT_DATA_PARAMS { +++ CK_BYTE iv[16]; +++ CK_BYTE_PTR pData; +++ CK_ULONG length; +++} CK_SEED_CBC_ENCRYPT_DATA_PARAMS; +++ +++typedef CK_SEED_CBC_ENCRYPT_DATA_PARAMS CK_PTR +++ CK_SEED_CBC_ENCRYPT_DATA_PARAMS_PTR; +++ +++/* ChaCha20/Salsa20 Counter support is new in v3.0*/ +++typedef struct CK_CHACHA20_PARAMS { +++ CK_BYTE_PTR pBlockCounter; +++ CK_ULONG blockCounterBits; +++ CK_BYTE_PTR pNonce; +++ CK_ULONG ulNonceBits; +++} CK_CHACHA20_PARAMS; +++ +++typedef CK_CHACHA20_PARAMS CK_PTR CK_CHACHA20_PARAMS_PTR; +++ +++typedef struct CK_SALSA20_PARAMS { +++ CK_BYTE_PTR pBlockCounter; +++ CK_BYTE_PTR pNonce; +++ CK_ULONG ulNonceBits; +++} CK_SALSA20_PARAMS; +++typedef CK_SALSA20_PARAMS CK_PTR CK_SALSA20_PARAMS_PTR; ++ ++ typedef struct CK_PBE_PARAMS { ++- CK_BYTE_PTR pInitVector; +++ CK_BYTE_PTR pInitVector; ++ CK_UTF8CHAR_PTR pPassword; ++- CK_ULONG ulPasswordLen; ++- CK_BYTE_PTR pSalt; ++- CK_ULONG ulSaltLen; ++- CK_ULONG ulIteration; +++ CK_ULONG ulPasswordLen; +++ CK_BYTE_PTR pSalt; +++ CK_ULONG ulSaltLen; +++ CK_ULONG ulIteration; ++ } CK_PBE_PARAMS; ++ ++ typedef CK_PBE_PARAMS CK_PTR CK_PBE_PARAMS_PTR; ++ ++- ++ /* CK_KEY_WRAP_SET_OAEP_PARAMS provides the parameters to the ++- * CKM_KEY_WRAP_SET_OAEP mechanism ++- */ +++ * CKM_KEY_WRAP_SET_OAEP mechanism */ +++/* CK_KEY_WRAP_SET_OAEP_PARAMS is new for v2.0 */ ++ typedef struct CK_KEY_WRAP_SET_OAEP_PARAMS { ++- CK_BYTE bBC; /* block contents byte */ ++- CK_BYTE_PTR pX; /* extra data */ ++- CK_ULONG ulXLen; /* length of extra data in bytes */ +++ CK_BYTE bBC; /* block contents byte */ +++ CK_BYTE_PTR pX; /* extra data */ +++ CK_ULONG ulXLen; /* length of extra data in bytes */ ++ } CK_KEY_WRAP_SET_OAEP_PARAMS; ++ ++-typedef CK_KEY_WRAP_SET_OAEP_PARAMS CK_PTR CK_KEY_WRAP_SET_OAEP_PARAMS_PTR; +++typedef CK_KEY_WRAP_SET_OAEP_PARAMS CK_PTR +++ CK_KEY_WRAP_SET_OAEP_PARAMS_PTR; ++ ++ typedef struct CK_SSL3_RANDOM_DATA { ++ CK_BYTE_PTR pClientRandom; ++- CK_ULONG ulClientRandomLen; +++ CK_ULONG ulClientRandomLen; ++ CK_BYTE_PTR pServerRandom; ++- CK_ULONG ulServerRandomLen; +++ CK_ULONG ulServerRandomLen; ++ } CK_SSL3_RANDOM_DATA; ++ ++- ++ typedef struct CK_SSL3_MASTER_KEY_DERIVE_PARAMS { ++ CK_SSL3_RANDOM_DATA RandomInfo; ++- CK_VERSION_PTR pVersion; +++ CK_VERSION_PTR pVersion; ++ } CK_SSL3_MASTER_KEY_DERIVE_PARAMS; ++ ++-typedef struct CK_SSL3_MASTER_KEY_DERIVE_PARAMS CK_PTR \ ++- CK_SSL3_MASTER_KEY_DERIVE_PARAMS_PTR; +++typedef struct CK_SSL3_MASTER_KEY_DERIVE_PARAMS CK_PTR +++ CK_SSL3_MASTER_KEY_DERIVE_PARAMS_PTR; ++ ++ typedef struct CK_SSL3_KEY_MAT_OUT { ++ CK_OBJECT_HANDLE hClientMacSecret; ++ CK_OBJECT_HANDLE hServerMacSecret; ++ CK_OBJECT_HANDLE hClientKey; ++ CK_OBJECT_HANDLE hServerKey; ++- CK_BYTE_PTR pIVClient; ++- CK_BYTE_PTR pIVServer; +++ CK_BYTE_PTR pIVClient; +++ CK_BYTE_PTR pIVServer; ++ } CK_SSL3_KEY_MAT_OUT; ++ ++ typedef CK_SSL3_KEY_MAT_OUT CK_PTR CK_SSL3_KEY_MAT_OUT_PTR; ++ ++- ++ typedef struct CK_SSL3_KEY_MAT_PARAMS { ++- CK_ULONG ulMacSizeInBits; ++- CK_ULONG ulKeySizeInBits; ++- CK_ULONG ulIVSizeInBits; ++- CK_BBOOL bIsExport; ++- CK_SSL3_RANDOM_DATA RandomInfo; +++ CK_ULONG ulMacSizeInBits; +++ CK_ULONG ulKeySizeInBits; +++ CK_ULONG ulIVSizeInBits; +++ CK_BBOOL bIsExport; /* Unused. Must be set to CK_FALSE. */ +++ CK_SSL3_RANDOM_DATA RandomInfo; ++ CK_SSL3_KEY_MAT_OUT_PTR pReturnedKeyMaterial; ++ } CK_SSL3_KEY_MAT_PARAMS; ++ ++ typedef CK_SSL3_KEY_MAT_PARAMS CK_PTR CK_SSL3_KEY_MAT_PARAMS_PTR; ++ +++/* CK_TLS_PRF_PARAMS is new for version 2.20 */ ++ typedef struct CK_TLS_PRF_PARAMS { ++- CK_BYTE_PTR pSeed; ++- CK_ULONG ulSeedLen; ++- CK_BYTE_PTR pLabel; ++- CK_ULONG ulLabelLen; ++- CK_BYTE_PTR pOutput; +++ CK_BYTE_PTR pSeed; +++ CK_ULONG ulSeedLen; +++ CK_BYTE_PTR pLabel; +++ CK_ULONG ulLabelLen; +++ CK_BYTE_PTR pOutput; ++ CK_ULONG_PTR pulOutputLen; ++ } CK_TLS_PRF_PARAMS; ++ ++ typedef CK_TLS_PRF_PARAMS CK_PTR CK_TLS_PRF_PARAMS_PTR; ++ +++/* TLS 1.2 is new for version 2.40 */ +++typedef struct CK_TLS12_MASTER_KEY_DERIVE_PARAMS { +++ CK_SSL3_RANDOM_DATA RandomInfo; +++ CK_VERSION_PTR pVersion; +++ CK_MECHANISM_TYPE prfHashMechanism; +++} CK_TLS12_MASTER_KEY_DERIVE_PARAMS; +++ +++typedef CK_TLS12_MASTER_KEY_DERIVE_PARAMS CK_PTR +++ CK_TLS12_MASTER_KEY_DERIVE_PARAMS_PTR; +++ +++typedef struct CK_TLS12_KEY_MAT_PARAMS { +++ CK_ULONG ulMacSizeInBits; +++ CK_ULONG ulKeySizeInBits; +++ CK_ULONG ulIVSizeInBits; +++ CK_BBOOL bIsExport; /* Unused. Must be set to CK_FALSE. */ +++ CK_SSL3_RANDOM_DATA RandomInfo; +++ CK_SSL3_KEY_MAT_OUT_PTR pReturnedKeyMaterial; +++ CK_MECHANISM_TYPE prfHashMechanism; +++} CK_TLS12_KEY_MAT_PARAMS; +++ +++typedef CK_TLS12_KEY_MAT_PARAMS CK_PTR CK_TLS12_KEY_MAT_PARAMS_PTR; +++ +++typedef struct CK_TLS_KDF_PARAMS { +++ CK_MECHANISM_TYPE prfMechanism; +++ CK_BYTE_PTR pLabel; +++ CK_ULONG ulLabelLength; +++ CK_SSL3_RANDOM_DATA RandomInfo; +++ CK_BYTE_PTR pContextData; +++ CK_ULONG ulContextDataLength; +++} CK_TLS_KDF_PARAMS; +++ +++typedef struct CK_TLS_MAC_PARAMS { +++ CK_MECHANISM_TYPE prfHashMechanism; +++ CK_ULONG ulMacLength; +++ CK_ULONG ulServerOrClient; +++} CK_TLS_MAC_PARAMS; +++ +++typedef CK_TLS_MAC_PARAMS CK_PTR CK_TLS_MAC_PARAMS_PTR; +++ +++/* HKDF is new for v3.0 */ +++typedef struct CK_HKDF_PARAMS { +++ CK_BBOOL bExtract; +++ CK_BBOOL bExpand; +++ CK_MECHANISM_TYPE prfHashMechanism; +++ CK_ULONG ulSaltType; +++ CK_BYTE_PTR pSalt; +++ CK_ULONG ulSaltLen; +++ CK_OBJECT_HANDLE hSaltKey; +++ CK_BYTE_PTR pInfo; +++ CK_ULONG ulInfoLen; +++} CK_HKDF_PARAMS; +++typedef CK_HKDF_PARAMS CK_PTR CK_HKDF_PARAMS_PTR; +++ +++#define CKF_HKDF_SALT_NULL 0x00000001UL +++#define CKF_HKDF_SALT_DATA 0x00000002UL +++#define CKF_HKDF_SALT_KEY 0x00000004UL +++ +++/* WTLS is new for version 2.20 */ ++ typedef struct CK_WTLS_RANDOM_DATA { ++ CK_BYTE_PTR pClientRandom; ++- CK_ULONG ulClientRandomLen; +++ CK_ULONG ulClientRandomLen; ++ CK_BYTE_PTR pServerRandom; ++- CK_ULONG ulServerRandomLen; +++ CK_ULONG ulServerRandomLen; ++ } CK_WTLS_RANDOM_DATA; ++ ++ typedef CK_WTLS_RANDOM_DATA CK_PTR CK_WTLS_RANDOM_DATA_PTR; ++ ++ typedef struct CK_WTLS_MASTER_KEY_DERIVE_PARAMS { ++- CK_MECHANISM_TYPE DigestMechanism; +++ CK_MECHANISM_TYPE DigestMechanism; ++ CK_WTLS_RANDOM_DATA RandomInfo; ++- CK_BYTE_PTR pVersion; +++ CK_BYTE_PTR pVersion; ++ } CK_WTLS_MASTER_KEY_DERIVE_PARAMS; ++ ++-typedef CK_WTLS_MASTER_KEY_DERIVE_PARAMS CK_PTR \ ++- CK_WTLS_MASTER_KEY_DERIVE_PARAMS_PTR; +++typedef CK_WTLS_MASTER_KEY_DERIVE_PARAMS CK_PTR +++ CK_WTLS_MASTER_KEY_DERIVE_PARAMS_PTR; ++ ++ typedef struct CK_WTLS_PRF_PARAMS { ++ CK_MECHANISM_TYPE DigestMechanism; ++- CK_BYTE_PTR pSeed; ++- CK_ULONG ulSeedLen; ++- CK_BYTE_PTR pLabel; ++- CK_ULONG ulLabelLen; ++- CK_BYTE_PTR pOutput; ++- CK_ULONG_PTR pulOutputLen; +++ CK_BYTE_PTR pSeed; +++ CK_ULONG ulSeedLen; +++ CK_BYTE_PTR pLabel; +++ CK_ULONG ulLabelLen; +++ CK_BYTE_PTR pOutput; +++ CK_ULONG_PTR pulOutputLen; ++ } CK_WTLS_PRF_PARAMS; ++ ++ typedef CK_WTLS_PRF_PARAMS CK_PTR CK_WTLS_PRF_PARAMS_PTR; ++@@ -1864,581 +2215,377 @@ typedef CK_WTLS_PRF_PARAMS CK_PTR CK_WTL ++ typedef struct CK_WTLS_KEY_MAT_OUT { ++ CK_OBJECT_HANDLE hMacSecret; ++ CK_OBJECT_HANDLE hKey; ++- CK_BYTE_PTR pIV; +++ CK_BYTE_PTR pIV; ++ } CK_WTLS_KEY_MAT_OUT; ++ ++ typedef CK_WTLS_KEY_MAT_OUT CK_PTR CK_WTLS_KEY_MAT_OUT_PTR; ++ ++ typedef struct CK_WTLS_KEY_MAT_PARAMS { ++- CK_MECHANISM_TYPE DigestMechanism; ++- CK_ULONG ulMacSizeInBits; ++- CK_ULONG ulKeySizeInBits; ++- CK_ULONG ulIVSizeInBits; ++- CK_ULONG ulSequenceNumber; ++- CK_BBOOL bIsExport; ++- CK_WTLS_RANDOM_DATA RandomInfo; +++ CK_MECHANISM_TYPE DigestMechanism; +++ CK_ULONG ulMacSizeInBits; +++ CK_ULONG ulKeySizeInBits; +++ CK_ULONG ulIVSizeInBits; +++ CK_ULONG ulSequenceNumber; +++ CK_BBOOL bIsExport; /* Unused. Must be set to CK_FALSE. */ +++ CK_WTLS_RANDOM_DATA RandomInfo; ++ CK_WTLS_KEY_MAT_OUT_PTR pReturnedKeyMaterial; ++ } CK_WTLS_KEY_MAT_PARAMS; ++ ++ typedef CK_WTLS_KEY_MAT_PARAMS CK_PTR CK_WTLS_KEY_MAT_PARAMS_PTR; ++ +++/* The following types for NIST 800-108 KBKDF are defined in PKCS#11 v3.0 */ +++typedef CK_MECHANISM_TYPE CK_SP800_108_PRF_TYPE; +++typedef CK_ULONG CK_PRF_DATA_TYPE; +++ +++#define CK_SP800_108_ITERATION_VARIABLE 0x00000001UL +++#define CK_SP800_108_OPTIONAL_COUNTER 0x00000002UL +++#define CK_SP800_108_DKM_LENGTH 0x00000003UL +++#define CK_SP800_108_BYTE_ARRAY 0x00000004UL +++ +++/* ERRATA: PKCS#11 v3.0 Cryptographic Token Interface Current Mechanisms +++ * specification specifies a CK_SP800_108_COUNTER, while the pkcs11t.h from +++ * PKCS#11 v3.0 Cryptographic Token Interface Base Specification specifies +++ * CK_SP800_108_OPTIONAL_COUNTER. */ +++#define CK_SP800_108_COUNTER CK_SP800_108_OPTIONAL_COUNTER +++ +++typedef struct CK_PRF_DATA_PARAM { +++ CK_PRF_DATA_TYPE type; +++ CK_VOID_PTR pValue; +++ CK_ULONG ulValueLen; +++} CK_PRF_DATA_PARAM; +++ +++typedef CK_PRF_DATA_PARAM CK_PTR CK_PRF_DATA_PARAM_PTR; +++ +++typedef struct CK_SP800_108_COUNTER_FORMAT { +++ CK_BBOOL bLittleEndian; +++ CK_ULONG ulWidthInBits; +++} CK_SP800_108_COUNTER_FORMAT; +++ +++typedef CK_SP800_108_COUNTER_FORMAT CK_PTR CK_SP800_108_COUNTER_FORMAT_PTR; +++ +++typedef CK_ULONG CK_SP800_108_DKM_LENGTH_METHOD; +++ +++/* ERRATA: PKCS#11 v3.0 Cryptographic Token Interface Current Mechanisms +++ * defines that these constants exist, but doesn't specify values. pkcs11t.h +++ * from PKCS#11 v3.0 Cryptographic Token Interface Base Specification doesn't +++ * define these constants either. */ +++#define CK_SP800_108_DKM_LENGTH_SUM_OF_KEYS 0x00000001UL +++#define CK_SP800_108_DKM_LENGTH_SUM_OF_SEGMENTS 0x00000002UL +++ +++typedef struct CK_SP800_108_DKM_LENGTH_FORMAT { +++ CK_SP800_108_DKM_LENGTH_METHOD dkmLengthMethod; +++ CK_BBOOL bLittleEndian; +++ CK_ULONG ulWidthInBits; +++} CK_SP800_108_DKM_LENGTH_FORMAT; +++ +++typedef CK_SP800_108_DKM_LENGTH_FORMAT CK_PTR CK_SP800_108_DKM_LENGTH_FORMAT_PTR; +++ +++typedef struct CK_DERIVED_KEY { +++ CK_ATTRIBUTE_PTR pTemplate; +++ CK_ULONG ulAttributeCount; +++ CK_OBJECT_HANDLE_PTR phKey; +++} CK_DERIVED_KEY; +++ +++typedef CK_DERIVED_KEY CK_PTR CK_DERIVED_KEY_PTR; +++ +++/* UNFIXED ERRATA: NIST SP800-108 specifies that implementer can decide the +++ * number of bits to take from each PRF invocation. However, all three forms +++ * of the PKCS#11 v3.0 implementation lack a bitwidth for the PRF and only +++ * allow the full-width mechanism varieties. Additionally, outside of the +++ * base key (used as the key to the PRF), there is no way to pass any +++ * additional, PRF-mechanism specific data. */ +++ +++typedef struct CK_SP800_108_KDF_PARAMS { +++ CK_SP800_108_PRF_TYPE prfType; +++ CK_ULONG ulNumberOfDataParams; +++ CK_PRF_DATA_PARAM_PTR pDataParams; +++ CK_ULONG ulAdditionalDerivedKeys; +++ /* ERRATA: in PKCS#11 v3.0, pAdditionalDerivedKeys is typed as +++ * CK_DERVIED_KEY; it needs to be of type CK_DERIVED_KEY_PTR. */ +++ CK_DERIVED_KEY_PTR pAdditionalDerivedKeys; +++} CK_SP800_108_KDF_PARAMS; +++ +++typedef CK_SP800_108_KDF_PARAMS CK_PTR CK_SP800_108_KDF_PARAMS_PTR; +++ +++typedef struct CK_SP800_108_FEEDBACK_KDF_PARAMS { +++ CK_SP800_108_PRF_TYPE prfType; +++ CK_ULONG ulNumberOfDataParams; +++ CK_PRF_DATA_PARAM_PTR pDataParams; +++ CK_ULONG ulIVLen; +++ CK_BYTE_PTR pIV; +++ CK_ULONG ulAdditionalDerivedKeys; +++ /* ERRATA: in PKCS#11 v3.0, pAdditionalDerivedKeys is typed as +++ * CK_DERVIED_KEY; it needs to be of type CK_DERIVED_KEY_PTR. */ +++ CK_DERIVED_KEY_PTR pAdditionalDerivedKeys; +++} CK_SP800_108_FEEDBACK_KDF_PARAMS; +++ +++typedef CK_SP800_108_FEEDBACK_KDF_PARAMS CK_PTR CK_SP800_108_FEEDBACK_KDF_PARAMS_PTR; +++ +++/* CMS is new for version 2.20 */ ++ typedef struct CK_CMS_SIG_PARAMS { ++ CK_OBJECT_HANDLE certificateHandle; ++ CK_MECHANISM_PTR pSigningMechanism; ++ CK_MECHANISM_PTR pDigestMechanism; ++- CK_UTF8CHAR_PTR pContentType; ++- CK_BYTE_PTR pRequestedAttributes; ++- CK_ULONG ulRequestedAttributesLen; ++- CK_BYTE_PTR pRequiredAttributes; ++- CK_ULONG ulRequiredAttributesLen; +++ CK_UTF8CHAR_PTR pContentType; +++ CK_BYTE_PTR pRequestedAttributes; +++ CK_ULONG ulRequestedAttributesLen; +++ CK_BYTE_PTR pRequiredAttributes; +++ CK_ULONG ulRequiredAttributesLen; ++ } CK_CMS_SIG_PARAMS; ++ ++ typedef CK_CMS_SIG_PARAMS CK_PTR CK_CMS_SIG_PARAMS_PTR; ++ ++ typedef struct CK_KEY_DERIVATION_STRING_DATA { ++ CK_BYTE_PTR pData; ++- CK_ULONG ulLen; +++ CK_ULONG ulLen; ++ } CK_KEY_DERIVATION_STRING_DATA; ++ ++-typedef CK_KEY_DERIVATION_STRING_DATA CK_PTR \ ++- CK_KEY_DERIVATION_STRING_DATA_PTR; ++- +++typedef CK_KEY_DERIVATION_STRING_DATA CK_PTR +++ CK_KEY_DERIVATION_STRING_DATA_PTR; ++ ++ /* The CK_EXTRACT_PARAMS is used for the ++ * CKM_EXTRACT_KEY_FROM_KEY mechanism. It specifies which bit ++ * of the base key should be used as the first bit of the ++- * derived key ++- */ +++ * derived key */ +++/* CK_EXTRACT_PARAMS is new for v2.0 */ ++ typedef CK_ULONG CK_EXTRACT_PARAMS; ++ ++ typedef CK_EXTRACT_PARAMS CK_PTR CK_EXTRACT_PARAMS_PTR; ++ ++-/* CK_PKCS5_PBKD2_PSEUDO_RANDOM_FUNCTION_TYPE is used to +++/* CK_PKCS5_PBKD2_PSEUDO_RANDOM_FUNCTION_TYPE is new for v2.10. +++ * CK_PKCS5_PBKD2_PSEUDO_RANDOM_FUNCTION_TYPE is used to ++ * indicate the Pseudo-Random Function (PRF) used to generate ++- * key bits using PKCS #5 PBKDF2. ++- */ +++ * key bits using PKCS #5 PBKDF2. */ ++ typedef CK_ULONG CK_PKCS5_PBKD2_PSEUDO_RANDOM_FUNCTION_TYPE; ++ ++-typedef CK_PKCS5_PBKD2_PSEUDO_RANDOM_FUNCTION_TYPE CK_PTR \ ++- CK_PKCS5_PBKD2_PSEUDO_RANDOM_FUNCTION_TYPE_PTR; +++typedef CK_PKCS5_PBKD2_PSEUDO_RANDOM_FUNCTION_TYPE CK_PTR CK_PKCS5_PBKD2_PSEUDO_RANDOM_FUNCTION_TYPE_PTR; ++ ++-#define CKP_PKCS5_PBKD2_HMAC_SHA1 0x00000001UL ++-#define CKP_PKCS5_PBKD2_HMAC_GOSTR3411 0x00000002UL ++-#define CKP_PKCS5_PBKD2_HMAC_SHA224 0x00000003UL ++-#define CKP_PKCS5_PBKD2_HMAC_SHA256 0x00000004UL ++-#define CKP_PKCS5_PBKD2_HMAC_SHA384 0x00000005UL ++-#define CKP_PKCS5_PBKD2_HMAC_SHA512 0x00000006UL ++-#define CKP_PKCS5_PBKD2_HMAC_SHA512_224 0x00000007UL ++-#define CKP_PKCS5_PBKD2_HMAC_SHA512_256 0x00000008UL +++/* The following PRFs are defined in PKCS #5 v2.1. */ +++#define CKP_PKCS5_PBKD2_HMAC_SHA1 0x00000001UL +++#define CKP_PKCS5_PBKD2_HMAC_GOSTR3411 0x00000002UL +++#define CKP_PKCS5_PBKD2_HMAC_SHA224 0x00000003UL +++#define CKP_PKCS5_PBKD2_HMAC_SHA256 0x00000004UL +++#define CKP_PKCS5_PBKD2_HMAC_SHA384 0x00000005UL +++#define CKP_PKCS5_PBKD2_HMAC_SHA512 0x00000006UL +++#define CKP_PKCS5_PBKD2_HMAC_SHA512_224 0x00000007UL +++#define CKP_PKCS5_PBKD2_HMAC_SHA512_256 0x00000008UL ++ ++-/* CK_PKCS5_PBKDF2_SALT_SOURCE_TYPE is used to indicate the +++/* CK_PKCS5_PBKDF2_SALT_SOURCE_TYPE is new for v2.10. +++ * CK_PKCS5_PBKDF2_SALT_SOURCE_TYPE is used to indicate the ++ * source of the salt value when deriving a key using PKCS #5 ++- * PBKDF2. ++- */ +++ * PBKDF2. */ ++ typedef CK_ULONG CK_PKCS5_PBKDF2_SALT_SOURCE_TYPE; ++ ++-typedef CK_PKCS5_PBKDF2_SALT_SOURCE_TYPE CK_PTR \ ++- CK_PKCS5_PBKDF2_SALT_SOURCE_TYPE_PTR; +++typedef CK_PKCS5_PBKDF2_SALT_SOURCE_TYPE CK_PTR CK_PKCS5_PBKDF2_SALT_SOURCE_TYPE_PTR; ++ ++ /* The following salt value sources are defined in PKCS #5 v2.0. */ ++-#define CKZ_SALT_SPECIFIED 0x00000001UL +++#define CKZ_SALT_SPECIFIED 0x00000001UL ++ ++-/* CK_PKCS5_PBKD2_PARAMS is a structure that provides the ++- * parameters to the CKM_PKCS5_PBKD2 mechanism. ++- */ +++/* CK_PKCS5_PBKD2_PARAMS is new for v2.10. +++ * CK_PKCS5_PBKD2_PARAMS is a structure that provides the +++ * parameters to the CKM_PKCS5_PBKD2 mechanism. */ +++/* this structure is kept for compatibility. use _PARAMS2. */ ++ typedef struct CK_PKCS5_PBKD2_PARAMS { ++- CK_PKCS5_PBKDF2_SALT_SOURCE_TYPE saltSource; ++- CK_VOID_PTR pSaltSourceData; ++- CK_ULONG ulSaltSourceDataLen; ++- CK_ULONG iterations; +++ CK_PKCS5_PBKDF2_SALT_SOURCE_TYPE saltSource; +++ CK_VOID_PTR pSaltSourceData; +++ CK_ULONG ulSaltSourceDataLen; +++ CK_ULONG iterations; ++ CK_PKCS5_PBKD2_PSEUDO_RANDOM_FUNCTION_TYPE prf; ++- CK_VOID_PTR pPrfData; ++- CK_ULONG ulPrfDataLen; ++- CK_UTF8CHAR_PTR pPassword; ++- CK_ULONG_PTR ulPasswordLen; +++ CK_VOID_PTR pPrfData; +++ CK_ULONG ulPrfDataLen; +++ CK_UTF8CHAR_PTR pPassword; +++ CK_ULONG_PTR ulPasswordLen; ++ } CK_PKCS5_PBKD2_PARAMS; ++ ++ typedef CK_PKCS5_PBKD2_PARAMS CK_PTR CK_PKCS5_PBKD2_PARAMS_PTR; ++ ++-/* CK_PKCS5_PBKD2_PARAMS2 is a corrected version of the CK_PKCS5_PBKD2_PARAMS ++- * structure that provides the parameters to the CKM_PKCS5_PBKD2 mechanism ++- * noting that the ulPasswordLen field is a CK_ULONG and not a CK_ULONG_PTR. ++- */ ++ typedef struct CK_PKCS5_PBKD2_PARAMS2 { ++- CK_PKCS5_PBKDF2_SALT_SOURCE_TYPE saltSource; ++- CK_VOID_PTR pSaltSourceData; ++- CK_ULONG ulSaltSourceDataLen; ++- CK_ULONG iterations; +++ CK_PKCS5_PBKDF2_SALT_SOURCE_TYPE saltSource; +++ CK_VOID_PTR pSaltSourceData; +++ CK_ULONG ulSaltSourceDataLen; +++ CK_ULONG iterations; ++ CK_PKCS5_PBKD2_PSEUDO_RANDOM_FUNCTION_TYPE prf; ++- CK_VOID_PTR pPrfData; ++- CK_ULONG ulPrfDataLen; ++- CK_UTF8CHAR_PTR pPassword; ++- CK_ULONG ulPasswordLen; +++ CK_VOID_PTR pPrfData; +++ CK_ULONG ulPrfDataLen; +++ CK_UTF8CHAR_PTR pPassword; +++ CK_ULONG ulPasswordLen; ++ } CK_PKCS5_PBKD2_PARAMS2; ++ ++ typedef CK_PKCS5_PBKD2_PARAMS2 CK_PTR CK_PKCS5_PBKD2_PARAMS2_PTR; ++ +++/* OTP is new in v2.40 */ ++ typedef CK_ULONG CK_OTP_PARAM_TYPE; ++-typedef CK_OTP_PARAM_TYPE CK_PARAM_TYPE; /* backward compatibility */ +++#define CK_OTP_VALUE 0UL +++#define CK_OTP_PIN 1UL +++#define CK_OTP_CHALLENGE 2UL +++#define CK_OTP_TIME 3UL +++#define CK_OTP_COUNTER 4UL +++#define CK_OTP_FLAGS 5UL +++#define CK_OTP_OUTPUT_LENGTH 6UL +++#define CK_OTP_OUTPUT_FORMAT 7UL ++ ++ typedef struct CK_OTP_PARAM { ++ CK_OTP_PARAM_TYPE type; ++- CK_VOID_PTR pValue; ++- CK_ULONG ulValueLen; +++ CK_VOID_PTR pValue; +++ CK_ULONG ulValueLen; ++ } CK_OTP_PARAM; ++ ++ typedef CK_OTP_PARAM CK_PTR CK_OTP_PARAM_PTR; ++ ++ typedef struct CK_OTP_PARAMS { ++ CK_OTP_PARAM_PTR pParams; ++- CK_ULONG ulCount; +++ CK_ULONG ulCount; ++ } CK_OTP_PARAMS; ++ ++ typedef CK_OTP_PARAMS CK_PTR CK_OTP_PARAMS_PTR; ++ ++ typedef struct CK_OTP_SIGNATURE_INFO { ++ CK_OTP_PARAM_PTR pParams; ++- CK_ULONG ulCount; +++ CK_ULONG ulCount; ++ } CK_OTP_SIGNATURE_INFO; ++ ++ typedef CK_OTP_SIGNATURE_INFO CK_PTR CK_OTP_SIGNATURE_INFO_PTR; ++ ++-#define CK_OTP_VALUE 0UL ++-#define CK_OTP_PIN 1UL ++-#define CK_OTP_CHALLENGE 2UL ++-#define CK_OTP_TIME 3UL ++-#define CK_OTP_COUNTER 4UL ++-#define CK_OTP_FLAGS 5UL ++-#define CK_OTP_OUTPUT_LENGTH 6UL ++-#define CK_OTP_OUTPUT_FORMAT 7UL ++- ++-#define CKF_NEXT_OTP 0x00000001UL ++-#define CKF_EXCLUDE_TIME 0x00000002UL ++-#define CKF_EXCLUDE_COUNTER 0x00000004UL +++#define CKF_NEXT_OTP 0x00000001UL +++#define CKF_EXCLUDE_TIME 0x00000002UL +++#define CKF_EXCLUDE_COUNTER 0x00000004UL ++ #define CKF_EXCLUDE_CHALLENGE 0x00000008UL ++-#define CKF_EXCLUDE_PIN 0x00000010UL +++#define CKF_EXCLUDE_PIN 0x00000010UL ++ #define CKF_USER_FRIENDLY_OTP 0x00000020UL ++ +++/* KIP is new in v2.40 */ ++ typedef struct CK_KIP_PARAMS { ++ CK_MECHANISM_PTR pMechanism; ++ CK_OBJECT_HANDLE hKey; ++- CK_BYTE_PTR pSeed; ++- CK_ULONG ulSeedLen; +++ CK_BYTE_PTR pSeed; +++ CK_ULONG ulSeedLen; ++ } CK_KIP_PARAMS; ++ ++ typedef CK_KIP_PARAMS CK_PTR CK_KIP_PARAMS_PTR; ++ ++-typedef struct CK_AES_CTR_PARAMS { ++- CK_ULONG ulCounterBits; ++- CK_BYTE cb[16]; ++-} CK_AES_CTR_PARAMS; ++- ++-typedef CK_AES_CTR_PARAMS CK_PTR CK_AES_CTR_PARAMS_PTR; ++- ++-typedef struct CK_GCM_PARAMS { ++- CK_BYTE_PTR pIv; ++- CK_ULONG ulIvLen; ++- CK_ULONG ulIvBits; ++- CK_BYTE_PTR pAAD; ++- CK_ULONG ulAADLen; ++- CK_ULONG ulTagBits; ++-} CK_GCM_PARAMS; ++- ++-typedef CK_GCM_PARAMS CK_PTR CK_GCM_PARAMS_PTR; ++- ++-typedef CK_ULONG CK_GENERATOR_FUNCTION; ++-#define CKG_NO_GENERATE 0x00000000UL ++-#define CKG_GENERATE 0x00000001UL ++-#define CKG_GENERATE_COUNTER 0x00000002UL ++-#define CKG_GENERATE_RANDOM 0x00000003UL ++- ++-typedef struct CK_GCM_MESSAGE_PARAMS { ++- CK_BYTE_PTR pIv; ++- CK_ULONG ulIvLen; ++- CK_ULONG ulIvFixedBits; ++- CK_GENERATOR_FUNCTION ivGenerator; ++- CK_BYTE_PTR pTag; ++- CK_ULONG ulTagBits; ++-} CK_GCM_MESSAGE_PARAMS; ++- ++-typedef CK_GCM_MESSAGE_PARAMS CK_GCM_MESSAGE_PARAMS_PTR; ++- ++-typedef struct CK_CCM_PARAMS { ++- CK_ULONG ulDataLen; ++- CK_BYTE_PTR pNonce; ++- CK_ULONG ulNonceLen; ++- CK_BYTE_PTR pAAD; ++- CK_ULONG ulAADLen; ++- CK_ULONG ulMACLen; ++-} CK_CCM_PARAMS; ++- ++-typedef CK_CCM_PARAMS CK_PTR CK_CCM_PARAMS_PTR; ++- ++-typedef struct CK_CCM_MESSAGE_PARAMS { ++- CK_ULONG ulDataLen; /*plaintext or ciphertext*/ ++- CK_BYTE_PTR pNonce; ++- CK_ULONG ulNonceLen; ++- CK_ULONG ulNonceFixedBits; ++- CK_GENERATOR_FUNCTION nonceGenerator; ++- CK_BYTE_PTR pMAC; ++- CK_ULONG ulMACLen; ++-} CK_CCM_MESSAGE_PARAMS; ++- ++-typedef CK_CCM_MESSAGE_PARAMS CK_CCM_MESSAGE_PARAMS_PTR; ++- ++-/* Deprecated. Use CK_GCM_PARAMS */ ++-typedef struct CK_AES_GCM_PARAMS { ++- CK_BYTE_PTR pIv; ++- CK_ULONG ulIvLen; ++- CK_ULONG ulIvBits; ++- CK_BYTE_PTR pAAD; ++- CK_ULONG ulAADLen; ++- CK_ULONG ulTagBits; ++-} CK_AES_GCM_PARAMS; ++- ++-typedef CK_AES_GCM_PARAMS CK_PTR CK_AES_GCM_PARAMS_PTR; ++- ++-/* Deprecated. Use CK_CCM_PARAMS */ ++-typedef struct CK_AES_CCM_PARAMS { ++- CK_ULONG ulDataLen; ++- CK_BYTE_PTR pNonce; ++- CK_ULONG ulNonceLen; ++- CK_BYTE_PTR pAAD; ++- CK_ULONG ulAADLen; ++- CK_ULONG ulMACLen; ++-} CK_AES_CCM_PARAMS; ++- ++-typedef CK_AES_CCM_PARAMS CK_PTR CK_AES_CCM_PARAMS_PTR; ++- ++-typedef struct CK_CAMELLIA_CTR_PARAMS { ++- CK_ULONG ulCounterBits; ++- CK_BYTE cb[16]; ++-} CK_CAMELLIA_CTR_PARAMS; ++- ++-typedef CK_CAMELLIA_CTR_PARAMS CK_PTR CK_CAMELLIA_CTR_PARAMS_PTR; ++- ++-typedef struct CK_CAMELLIA_CBC_ENCRYPT_DATA_PARAMS { ++- CK_BYTE iv[16]; ++- CK_BYTE_PTR pData; ++- CK_ULONG length; ++-} CK_CAMELLIA_CBC_ENCRYPT_DATA_PARAMS; ++- ++-typedef CK_CAMELLIA_CBC_ENCRYPT_DATA_PARAMS CK_PTR \ ++- CK_CAMELLIA_CBC_ENCRYPT_DATA_PARAMS_PTR; ++- ++-typedef struct CK_ARIA_CBC_ENCRYPT_DATA_PARAMS { ++- CK_BYTE iv[16]; ++- CK_BYTE_PTR pData; ++- CK_ULONG length; ++-} CK_ARIA_CBC_ENCRYPT_DATA_PARAMS; ++- ++-typedef CK_ARIA_CBC_ENCRYPT_DATA_PARAMS CK_PTR \ ++- CK_ARIA_CBC_ENCRYPT_DATA_PARAMS_PTR; ++- +++/* DSA Param Gen is new for v2.40 */ ++ typedef struct CK_DSA_PARAMETER_GEN_PARAM { ++ CK_MECHANISM_TYPE hash; ++- CK_BYTE_PTR pSeed; ++- CK_ULONG ulSeedLen; ++- CK_ULONG ulIndex; +++ CK_BYTE_PTR pSeed; +++ CK_ULONG ulSeedLen; +++ CK_ULONG ulIndex; ++ } CK_DSA_PARAMETER_GEN_PARAM; ++ ++ typedef CK_DSA_PARAMETER_GEN_PARAM CK_PTR CK_DSA_PARAMETER_GEN_PARAM_PTR; ++ +++/* XXXX_AES_KEY_WRAP is new for v2.40 */ ++ typedef struct CK_ECDH_AES_KEY_WRAP_PARAMS { ++- CK_ULONG ulAESKeyBits; +++ CK_ULONG ulAESKeyBits; ++ CK_EC_KDF_TYPE kdf; ++- CK_ULONG ulSharedDataLen; ++- CK_BYTE_PTR pSharedData; +++ CK_ULONG ulSharedDataLen; +++ CK_BYTE_PTR pSharedData; ++ } CK_ECDH_AES_KEY_WRAP_PARAMS; ++ ++ typedef CK_ECDH_AES_KEY_WRAP_PARAMS CK_PTR CK_ECDH_AES_KEY_WRAP_PARAMS_PTR; ++ ++-typedef CK_ULONG CK_JAVA_MIDP_SECURITY_DOMAIN; ++- ++-typedef CK_ULONG CK_CERTIFICATE_CATEGORY; ++- ++ typedef struct CK_RSA_AES_KEY_WRAP_PARAMS { ++- CK_ULONG ulAESKeyBits; +++ CK_ULONG ulAESKeyBits; ++ CK_RSA_PKCS_OAEP_PARAMS_PTR pOAEPParams; ++ } CK_RSA_AES_KEY_WRAP_PARAMS; ++ ++ typedef CK_RSA_AES_KEY_WRAP_PARAMS CK_PTR CK_RSA_AES_KEY_WRAP_PARAMS_PTR; ++ ++-typedef struct CK_TLS12_MASTER_KEY_DERIVE_PARAMS { ++- CK_SSL3_RANDOM_DATA RandomInfo; ++- CK_VERSION_PTR pVersion; ++- CK_MECHANISM_TYPE prfHashMechanism; ++-} CK_TLS12_MASTER_KEY_DERIVE_PARAMS; ++- ++-typedef CK_TLS12_MASTER_KEY_DERIVE_PARAMS CK_PTR \ ++- CK_TLS12_MASTER_KEY_DERIVE_PARAMS_PTR; ++- ++-typedef struct CK_TLS12_KEY_MAT_PARAMS { ++- CK_ULONG ulMacSizeInBits; ++- CK_ULONG ulKeySizeInBits; ++- CK_ULONG ulIVSizeInBits; ++- CK_BBOOL bIsExport; ++- CK_SSL3_RANDOM_DATA RandomInfo; ++- CK_SSL3_KEY_MAT_OUT_PTR pReturnedKeyMaterial; ++- CK_MECHANISM_TYPE prfHashMechanism; ++-} CK_TLS12_KEY_MAT_PARAMS; ++- ++-typedef CK_TLS12_KEY_MAT_PARAMS CK_PTR CK_TLS12_KEY_MAT_PARAMS_PTR; ++- ++-typedef struct CK_TLS_KDF_PARAMS { ++- CK_MECHANISM_TYPE prfMechanism; ++- CK_BYTE_PTR pLabel; ++- CK_ULONG ulLabelLength; ++- CK_SSL3_RANDOM_DATA RandomInfo; ++- CK_BYTE_PTR pContextData; ++- CK_ULONG ulContextDataLength; ++-} CK_TLS_KDF_PARAMS; ++- ++-typedef CK_TLS_KDF_PARAMS CK_PTR CK_TLS_KDF_PARAMS_PTR; ++- ++-typedef struct CK_TLS_MAC_PARAMS { ++- CK_MECHANISM_TYPE prfHashMechanism; ++- CK_ULONG ulMacLength; ++- CK_ULONG ulServerOrClient; ++-} CK_TLS_MAC_PARAMS; ++- ++-typedef CK_TLS_MAC_PARAMS CK_PTR CK_TLS_MAC_PARAMS_PTR; ++- +++/* GOSTR3410 is new for v2.40 */ ++ typedef struct CK_GOSTR3410_DERIVE_PARAMS { ++ CK_EC_KDF_TYPE kdf; ++- CK_BYTE_PTR pPublicData; ++- CK_ULONG ulPublicDataLen; ++- CK_BYTE_PTR pUKM; ++- CK_ULONG ulUKMLen; +++ CK_BYTE_PTR pPublicData; +++ CK_ULONG ulPublicDataLen; +++ CK_BYTE_PTR pUKM; +++ CK_ULONG ulUKMLen; ++ } CK_GOSTR3410_DERIVE_PARAMS; ++ ++ typedef CK_GOSTR3410_DERIVE_PARAMS CK_PTR CK_GOSTR3410_DERIVE_PARAMS_PTR; ++ ++ typedef struct CK_GOSTR3410_KEY_WRAP_PARAMS { ++- CK_BYTE_PTR pWrapOID; ++- CK_ULONG ulWrapOIDLen; ++- CK_BYTE_PTR pUKM; ++- CK_ULONG ulUKMLen; +++ CK_BYTE_PTR pWrapOID; +++ CK_ULONG ulWrapOIDLen; +++ CK_BYTE_PTR pUKM; +++ CK_ULONG ulUKMLen; ++ CK_OBJECT_HANDLE hKey; ++ } CK_GOSTR3410_KEY_WRAP_PARAMS; ++ ++ typedef CK_GOSTR3410_KEY_WRAP_PARAMS CK_PTR CK_GOSTR3410_KEY_WRAP_PARAMS_PTR; ++ ++-typedef struct CK_SEED_CBC_ENCRYPT_DATA_PARAMS { ++- CK_BYTE iv[16]; ++- CK_BYTE_PTR pData; ++- CK_ULONG length; ++-} CK_SEED_CBC_ENCRYPT_DATA_PARAMS; ++- ++-typedef CK_SEED_CBC_ENCRYPT_DATA_PARAMS CK_PTR \ ++- CK_SEED_CBC_ENCRYPT_DATA_PARAMS_PTR; ++- ++-/* ++- * New PKCS 11 v3.0 data structures. ++- */ ++- ++-typedef CK_ULONG CK_PROFILE_ID; ++-typedef CK_PROFILE_ID CK_PTR CK_PROFILE_ID_PTR; ++- ++-/* Typedefs for Flexible KDF */ ++-typedef CK_ULONG CK_PRF_DATA_TYPE; ++-typedef CK_MECHANISM_TYPE CK_SP800_108_PRF_TYPE; ++-#define CK_SP800_108_ITERATION_VARIABLE 0x00000001UL ++-#define CK_SP800_108_OPTIONAL_COUNTER 0x00000002UL ++-#define CK_SP800_108_DKM_LENGTH 0x00000003UL ++-#define CK_SP800_108_BYTE_ARRAY 0x00000004UL ++-#define CK_SP800_108_COUNTER CK_SP800_108_OPTIONAL_COUNTER ++- ++-typedef struct CK_PRF_DATA_PARAM ++-{ ++- CK_PRF_DATA_TYPE type; ++- CK_VOID_PTR pValue; ++- CK_ULONG ulValueLen; ++-} CK_PRF_DATA_PARAM; ++- ++-typedef CK_PRF_DATA_PARAM CK_PTR CK_PRF_DATA_PARAM_PTR; ++- ++- ++-typedef struct CK_SP800_108_COUNTER_FORMAT ++-{ ++- CK_BBOOL bLittleEndian; ++- CK_ULONG ulWidthInBits; ++-} CK_SP800_108_COUNTER_FORMAT; ++- ++-typedef CK_SP800_108_COUNTER_FORMAT CK_PTR CK_SP800_108_COUNTER_FORMAT_PTR; ++- ++-typedef CK_ULONG CK_SP800_108_DKM_LENGTH_METHOD; ++-#define CK_SP800_108_DKM_LENGTH_SUM_OF_KEYS 0x00000001UL ++-#define CK_SP800_108_DKM_LENGTH_SUM_OF_SEGMENTS 0x00000002UL ++- ++-typedef struct CK_SP800_108_DKM_LENGTH_FORMAT ++-{ ++- CK_SP800_108_DKM_LENGTH_METHOD dkmLengthMethod; ++- CK_BBOOL bLittleEndian; ++- CK_ULONG ulWidthInBits; ++-} CK_SP800_108_DKM_LENGTH_FORMAT; ++- ++-typedef CK_SP800_108_DKM_LENGTH_FORMAT \ ++- CK_PTR CK_SP800_108_DKM_LENGTH_FORMAT_PTR; ++- ++-typedef struct CK_DERIVED_KEY ++-{ ++- CK_ATTRIBUTE_PTR pTemplate; ++- CK_ULONG ulAttributeCount; ++- CK_OBJECT_HANDLE_PTR phKey; ++-} CK_DERIVED_KEY; ++- ++-typedef CK_DERIVED_KEY CK_PTR CK_DERIVED_KEY_PTR; ++- ++-typedef struct CK_SP800_108_KDF_PARAMS ++-{ ++- CK_SP800_108_PRF_TYPE prfType; ++- CK_ULONG ulNumberOfDataParams; ++- CK_PRF_DATA_PARAM_PTR pDataParams; ++- CK_ULONG ulAdditionalDerivedKeys; ++- CK_DERIVED_KEY_PTR pAdditionalDerivedKeys; ++-} CK_SP800_108_KDF_PARAMS; ++- ++-typedef CK_SP800_108_KDF_PARAMS CK_PTR CK_SP800_108_KDF_PARAMS_PTR; ++- ++-typedef struct CK_SP800_108_FEEDBACK_KDF_PARAMS ++-{ ++- CK_SP800_108_PRF_TYPE prfType; ++- CK_ULONG ulNumberOfDataParams; ++- CK_PRF_DATA_PARAM_PTR pDataParams; ++- CK_ULONG ulIVLen; ++- CK_BYTE_PTR pIV; ++- CK_ULONG ulAdditionalDerivedKeys; ++- CK_DERIVED_KEY_PTR pAdditionalDerivedKeys; ++-} CK_SP800_108_FEEDBACK_KDF_PARAMS; ++- ++-typedef CK_SP800_108_FEEDBACK_KDF_PARAMS \ ++- CK_PTR CK_SP800_108_FEEDBACK_KDF_PARAMS_PTR; ++- ++-/* EDDSA */ +++/* EDDSA and XEDDSA are new for v3.0 */ ++ typedef struct CK_EDDSA_PARAMS { ++- CK_BBOOL phFlag; ++- CK_ULONG ulContextDataLen; +++ CK_BBOOL phFlag; +++ CK_ULONG ulContextDataLen; ++ CK_BYTE_PTR pContextData; ++ } CK_EDDSA_PARAMS; +++typedef CK_ULONG CK_XEDDSA_HASH_TYPE; +++typedef CK_XEDDSA_HASH_TYPE CK_PTR CK_XEDDSA_HASH_TYPE_PTR; ++ ++-typedef CK_EDDSA_PARAMS CK_PTR CK_EDDSA_PARAMS_PTR; ++- ++-/* Extended ChaCha20/Salsa20 support*/ ++-typedef struct CK_CHACHA20_PARAMS { ++- CK_BYTE_PTR pBlockCounter; ++- CK_ULONG blockCounterBits; ++- CK_BYTE_PTR pNonce; ++- CK_ULONG ulNonceBits; ++-} CK_CHACHA20_PARAMS; ++- ++-typedef CK_CHACHA20_PARAMS CK_PTR CK_CHACHA20_PARAMS_PTR; ++- ++-typedef struct CK_SALSA20_PARAMS { ++- CK_BYTE_PTR pBlockCounter; ++- CK_BYTE_PTR pNonce; ++- CK_ULONG ulNonceBits; ++-} CK_SALSA20_PARAMS; ++- ++-typedef CK_SALSA20_PARAMS CK_PTR CK_SALSA20_PARAMS_PTR; ++- ++-typedef struct CK_SALSA20_CHACHA20_POLY1305_PARAMS { ++- CK_BYTE_PTR pNonce; ++- CK_ULONG ulNonceLen; ++- CK_BYTE_PTR pAAD; ++- CK_ULONG ulAADLen; ++-} CK_SALSA20_CHACHA20_POLY1305_PARAMS; ++- ++-typedef CK_SALSA20_CHACHA20_POLY1305_PARAMS \ ++- CK_PTR CK_SALSA20_CHACHA20_POLY1305_PARAMS_PTR; ++- ++-typedef struct CK_SALSA20_CHACHA20_POLY1305_MSG_PARAMS { ++- CK_BYTE_PTR pNonce; ++- CK_ULONG ulNonceLen; ++- CK_BYTE_PTR pTag; ++-} CK_SALSA20_CHACHA20_POLY1305_MSG_PARAMS; ++- ++-typedef CK_SALSA20_CHACHA20_POLY1305_MSG_PARAMS \ ++- CK_PTR CK_SALSA20_CHACHA20_POLY1305_MSG_PARAMS_PTR; +++typedef struct CK_XEDDSA_PARAMS { +++ CK_XEDDSA_HASH_TYPE hash; +++} CK_XEDDSA_PARAMS; +++typedef CK_XEDDSA_PARAMS CK_PTR CK_XEDDSA_PARAMS_PTR; ++ +++/* X3DH and Ratchet are new in v3.0 */ ++ typedef CK_ULONG CK_X3DH_KDF_TYPE; ++ typedef CK_X3DH_KDF_TYPE CK_PTR CK_X3DH_KDF_TYPE_PTR; ++ ++-/* X3dh, ratchet */ ++ typedef struct CK_X3DH_INITIATE_PARAMS { ++ CK_X3DH_KDF_TYPE kdf; ++ CK_OBJECT_HANDLE pPeer_identity; ++ CK_OBJECT_HANDLE pPeer_prekey; ++- CK_BYTE_PTR pPrekey_signature; ++- CK_BYTE_PTR pOnetime_key; +++ CK_BYTE_PTR pPrekey_signature; +++ CK_BYTE_PTR pOnetime_key; ++ CK_OBJECT_HANDLE pOwn_identity; ++ CK_OBJECT_HANDLE pOwn_ephemeral; ++ } CK_X3DH_INITIATE_PARAMS; ++ ++ typedef struct CK_X3DH_RESPOND_PARAMS { ++ CK_X3DH_KDF_TYPE kdf; ++- CK_BYTE_PTR pIdentity_id; ++- CK_BYTE_PTR pPrekey_id; ++- CK_BYTE_PTR pOnetime_id; +++ CK_BYTE_PTR pIdentity_id; +++ CK_BYTE_PTR pPrekey_id; +++ CK_BYTE_PTR pOnetime_id; ++ CK_OBJECT_HANDLE pInitiator_identity; ++- CK_BYTE_PTR pInitiator_ephemeral; +++ CK_BYTE_PTR pInitiator_ephemeral; ++ } CK_X3DH_RESPOND_PARAMS; ++ ++ typedef CK_ULONG CK_X2RATCHET_KDF_TYPE; ++ typedef CK_X2RATCHET_KDF_TYPE CK_PTR CK_X2RATCHET_KDF_TYPE_PTR; ++ ++ typedef struct CK_X2RATCHET_INITIALIZE_PARAMS { ++- CK_BYTE_PTR sk; ++- CK_OBJECT_HANDLE peer_public_prekey; ++- CK_OBJECT_HANDLE peer_public_identity; ++- CK_OBJECT_HANDLE own_public_identity; ++- CK_BBOOL bEncryptedHeader; ++- CK_ULONG eCurve; ++- CK_MECHANISM_TYPE aeadMechanism; +++ CK_BYTE_PTR sk; +++ CK_OBJECT_HANDLE peer_public_prekey; +++ CK_OBJECT_HANDLE peer_public_identity; +++ CK_OBJECT_HANDLE own_public_identity; +++ CK_BBOOL bEncryptedHeader; +++ CK_ULONG eCurve; +++ CK_MECHANISM_TYPE aeadMechanism; ++ CK_X2RATCHET_KDF_TYPE kdfMechanism; ++ } CK_X2RATCHET_INITIALIZE_PARAMS; ++ ++-typedef CK_X2RATCHET_INITIALIZE_PARAMS \ ++- CK_PTR CK_X2RATCHET_INITIALIZE_PARAMS_PTR; +++typedef CK_X2RATCHET_INITIALIZE_PARAMS +++ CK_PTR CK_X2RATCHET_INITIALIZE_PARAMS_PTR; ++ ++ typedef struct CK_X2RATCHET_RESPOND_PARAMS { ++- CK_BYTE_PTR sk; ++- CK_OBJECT_HANDLE own_prekey; ++- CK_OBJECT_HANDLE initiator_identity; ++- CK_OBJECT_HANDLE own_public_identity; ++- CK_BBOOL bEncryptedHeader; ++- CK_ULONG eCurve; ++- CK_MECHANISM_TYPE aeadMechanism; +++ CK_BYTE_PTR sk; +++ CK_OBJECT_HANDLE own_prekey; +++ CK_OBJECT_HANDLE initiator_identity; +++ CK_OBJECT_HANDLE own_public_identity; +++ CK_BBOOL bEncryptedHeader; +++ CK_ULONG eCurve; +++ CK_MECHANISM_TYPE aeadMechanism; ++ CK_X2RATCHET_KDF_TYPE kdfMechanism; ++ } CK_X2RATCHET_RESPOND_PARAMS; ++-typedef CK_X2RATCHET_RESPOND_PARAMS \ ++- CK_PTR CK_X2RATCHET_RESPOND_PARAMS_PTR; +++typedef CK_X2RATCHET_RESPOND_PARAMS +++ CK_PTR CK_X2RATCHET_RESPOND_PARAMS_PTR; ++ ++-typedef CK_ULONG CK_XEDDSA_HASH_TYPE; ++-typedef CK_XEDDSA_HASH_TYPE CK_PTR CK_XEDDSA_HASH_TYPE_PTR; +++/* NSS Specific defines */ +++/* stuff that for historic reasons is in this header file but should have +++ * been in pkcs11n.h */ +++#define CKK_INVALID_KEY_TYPE 0xffffffffUL ++ ++-/* XEDDSA */ ++-typedef struct CK_XEDDSA_PARAMS { ++- CK_XEDDSA_HASH_TYPE hash; ++-} CK_XEDDSA_PARAMS; ++-typedef CK_XEDDSA_PARAMS CK_PTR CK_XEDDSA_PARAMS_PTR; +++#include "pkcs11n.h" ++ ++-typedef struct CK_HKDF_PARAMS { ++- CK_BBOOL bExtract; ++- CK_BBOOL bExpand; ++- CK_MECHANISM_TYPE prfHashMechanism; ++- CK_ULONG ulSaltType; ++- CK_BYTE_PTR pSalt; ++- CK_ULONG ulSaltLen; ++- CK_OBJECT_HANDLE hSaltKey; ++- CK_BYTE_PTR pInfo; ++- CK_ULONG ulInfoLen; ++-} CK_HKDF_PARAMS; ++-typedef CK_HKDF_PARAMS CK_PTR CK_HKDF_PARAMS_PTR; ++- ++-#define CKF_HKDF_SALT_NULL 0x00000001UL ++-#define CKF_HKDF_SALT_DATA 0x00000002UL ++-#define CKF_HKDF_SALT_KEY 0x00000004UL ++- ++-#endif /* _PKCS11T_H_ */ +++/* undo packing */ +++#include "pkcs11u.h" ++ +++#endif ++--- /dev/null +++++ b/src/jdk.crypto.cryptoki/share/native/libj2pkcs11/pkcs11u.h ++@@ -0,0 +1,22 @@ +++/* This Source Code Form is subject to the terms of the Mozilla Public +++ * License, v. 2.0. If a copy of the MPL was not distributed with this +++ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ +++/* +++ * Copyright (C) 1994-1999 RSA Security Inc. Licence to copy this document +++ * is granted provided that it is identified as "RSA Security Inc. Public-Key +++ * Cryptography Standards (PKCS)" in all material mentioning or referencing +++ * this document. +++ */ +++/* +++ * reset any packing set by pkcs11p.h +++ */ +++ +++#if defined(_WIN32) || defined(_WINDOWS) +++#ifdef __clang__ +++#pragma clang diagnostic ignored "-Wpragma-pack" +++#endif +++#ifdef _MSC_VER +++#pragma warning(disable : 4103) +++#endif +++#pragma pack(pop, cryptoki) +++#endif ++--- /dev/null +++++ b/src/jdk.crypto.cryptoki/share/native/libj2pkcs11/prcpucfg.h ++@@ -0,0 +1,1319 @@ +++/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +++/* This Source Code Form is subject to the terms of the Mozilla Public +++ * License, v. 2.0. If a copy of the MPL was not distributed with this +++ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ +++ +++/* +++ * This file is used by not only Linux but also other glibc systems +++ * such as GNU/Hurd and GNU/k*BSD. +++ */ +++ +++#ifndef nspr_cpucfg___ +++#define nspr_cpucfg___ +++ +++#ifndef XP_UNIX +++#define XP_UNIX +++#endif +++ +++#if !defined(LINUX) && defined(__linux__) +++#define LINUX +++#endif +++ +++#ifdef __FreeBSD_kernel__ +++#define PR_AF_INET6 28 /* same as AF_INET6 */ +++#elif defined(__GNU__) +++#define PR_AF_INET6 26 /* same as AF_INET6 */ +++#else +++#define PR_AF_INET6 10 /* same as AF_INET6 */ +++#endif +++ +++#ifdef __powerpc64__ +++ +++#ifdef __LITTLE_ENDIAN__ +++#define IS_LITTLE_ENDIAN 1 +++#undef IS_BIG_ENDIAN +++#else +++#undef IS_LITTLE_ENDIAN +++#define IS_BIG_ENDIAN 1 +++#endif +++#define IS_64 +++ +++#define PR_BYTES_PER_BYTE 1 +++#define PR_BYTES_PER_SHORT 2 +++#define PR_BYTES_PER_INT 4 +++#define PR_BYTES_PER_INT64 8 +++#define PR_BYTES_PER_LONG 8 +++#define PR_BYTES_PER_FLOAT 4 +++#define PR_BYTES_PER_DOUBLE 8 +++#define PR_BYTES_PER_WORD 8 +++#define PR_BYTES_PER_DWORD 8 +++ +++#define PR_BITS_PER_BYTE 8 +++#define PR_BITS_PER_SHORT 16 +++#define PR_BITS_PER_INT 32 +++#define PR_BITS_PER_INT64 64 +++#define PR_BITS_PER_LONG 64 +++#define PR_BITS_PER_FLOAT 32 +++#define PR_BITS_PER_DOUBLE 64 +++#define PR_BITS_PER_WORD 64 +++ +++#define PR_BITS_PER_BYTE_LOG2 3 +++#define PR_BITS_PER_SHORT_LOG2 4 +++#define PR_BITS_PER_INT_LOG2 5 +++#define PR_BITS_PER_INT64_LOG2 6 +++#define PR_BITS_PER_LONG_LOG2 6 +++#define PR_BITS_PER_FLOAT_LOG2 5 +++#define PR_BITS_PER_DOUBLE_LOG2 6 +++#define PR_BITS_PER_WORD_LOG2 6 +++ +++#define PR_ALIGN_OF_SHORT 2 +++#define PR_ALIGN_OF_INT 4 +++#define PR_ALIGN_OF_LONG 8 +++#define PR_ALIGN_OF_INT64 8 +++#define PR_ALIGN_OF_FLOAT 4 +++#define PR_ALIGN_OF_DOUBLE 8 +++#define PR_ALIGN_OF_POINTER 8 +++#define PR_ALIGN_OF_WORD 8 +++ +++#define PR_BYTES_PER_WORD_LOG2 3 +++#define PR_BYTES_PER_DWORD_LOG2 3 +++ +++#elif defined(__powerpc__) +++ +++#ifdef __LITTLE_ENDIAN__ +++#define IS_LITTLE_ENDIAN 1 +++#undef IS_BIG_ENDIAN +++#else +++#undef IS_LITTLE_ENDIAN +++#define IS_BIG_ENDIAN 1 +++#endif +++ +++#define PR_BYTES_PER_BYTE 1 +++#define PR_BYTES_PER_SHORT 2 +++#define PR_BYTES_PER_INT 4 +++#define PR_BYTES_PER_INT64 8 +++#define PR_BYTES_PER_LONG 4 +++#define PR_BYTES_PER_FLOAT 4 +++#define PR_BYTES_PER_DOUBLE 8 +++#define PR_BYTES_PER_WORD 4 +++#define PR_BYTES_PER_DWORD 8 +++ +++#define PR_BITS_PER_BYTE 8 +++#define PR_BITS_PER_SHORT 16 +++#define PR_BITS_PER_INT 32 +++#define PR_BITS_PER_INT64 64 +++#define PR_BITS_PER_LONG 32 +++#define PR_BITS_PER_FLOAT 32 +++#define PR_BITS_PER_DOUBLE 64 +++#define PR_BITS_PER_WORD 32 +++ +++#define PR_BITS_PER_BYTE_LOG2 3 +++#define PR_BITS_PER_SHORT_LOG2 4 +++#define PR_BITS_PER_INT_LOG2 5 +++#define PR_BITS_PER_INT64_LOG2 6 +++#define PR_BITS_PER_LONG_LOG2 5 +++#define PR_BITS_PER_FLOAT_LOG2 5 +++#define PR_BITS_PER_DOUBLE_LOG2 6 +++#define PR_BITS_PER_WORD_LOG2 5 +++ +++#define PR_ALIGN_OF_SHORT 2 +++#define PR_ALIGN_OF_INT 4 +++#define PR_ALIGN_OF_LONG 4 +++#define PR_ALIGN_OF_INT64 8 +++#define PR_ALIGN_OF_FLOAT 4 +++#define PR_ALIGN_OF_DOUBLE 8 +++#define PR_ALIGN_OF_POINTER 4 +++#define PR_ALIGN_OF_WORD 4 +++ +++#define PR_BYTES_PER_WORD_LOG2 2 +++#define PR_BYTES_PER_DWORD_LOG2 3 +++ +++#elif defined(__alpha) +++ +++#define IS_LITTLE_ENDIAN 1 +++#undef IS_BIG_ENDIAN +++#define IS_64 +++ +++#define PR_BYTES_PER_BYTE 1 +++#define PR_BYTES_PER_SHORT 2 +++#define PR_BYTES_PER_INT 4 +++#define PR_BYTES_PER_INT64 8 +++#define PR_BYTES_PER_LONG 8 +++#define PR_BYTES_PER_FLOAT 4 +++#define PR_BYTES_PER_DOUBLE 8 +++#define PR_BYTES_PER_WORD 8 +++#define PR_BYTES_PER_DWORD 8 +++ +++#define PR_BITS_PER_BYTE 8 +++#define PR_BITS_PER_SHORT 16 +++#define PR_BITS_PER_INT 32 +++#define PR_BITS_PER_INT64 64 +++#define PR_BITS_PER_LONG 64 +++#define PR_BITS_PER_FLOAT 32 +++#define PR_BITS_PER_DOUBLE 64 +++#define PR_BITS_PER_WORD 64 +++ +++#define PR_BITS_PER_BYTE_LOG2 3 +++#define PR_BITS_PER_SHORT_LOG2 4 +++#define PR_BITS_PER_INT_LOG2 5 +++#define PR_BITS_PER_INT64_LOG2 6 +++#define PR_BITS_PER_LONG_LOG2 6 +++#define PR_BITS_PER_FLOAT_LOG2 5 +++#define PR_BITS_PER_DOUBLE_LOG2 6 +++#define PR_BITS_PER_WORD_LOG2 6 +++ +++#define PR_ALIGN_OF_SHORT 2 +++#define PR_ALIGN_OF_INT 4 +++#define PR_ALIGN_OF_LONG 8 +++#define PR_ALIGN_OF_INT64 8 +++#define PR_ALIGN_OF_FLOAT 4 +++#define PR_ALIGN_OF_DOUBLE 8 +++#define PR_ALIGN_OF_POINTER 8 +++#define PR_ALIGN_OF_WORD 8 +++ +++#define PR_BYTES_PER_WORD_LOG2 3 +++#define PR_BYTES_PER_DWORD_LOG2 3 +++ +++#elif defined(__ia64__) +++ +++#define IS_LITTLE_ENDIAN 1 +++#undef IS_BIG_ENDIAN +++#define IS_64 +++ +++#define PR_BYTES_PER_BYTE 1 +++#define PR_BYTES_PER_SHORT 2 +++#define PR_BYTES_PER_INT 4 +++#define PR_BYTES_PER_INT64 8 +++#define PR_BYTES_PER_LONG 8 +++#define PR_BYTES_PER_FLOAT 4 +++#define PR_BYTES_PER_DOUBLE 8 +++#define PR_BYTES_PER_WORD 8 +++#define PR_BYTES_PER_DWORD 8 +++ +++#define PR_BITS_PER_BYTE 8 +++#define PR_BITS_PER_SHORT 16 +++#define PR_BITS_PER_INT 32 +++#define PR_BITS_PER_INT64 64 +++#define PR_BITS_PER_LONG 64 +++#define PR_BITS_PER_FLOAT 32 +++#define PR_BITS_PER_DOUBLE 64 +++#define PR_BITS_PER_WORD 64 +++ +++#define PR_BITS_PER_BYTE_LOG2 3 +++#define PR_BITS_PER_SHORT_LOG2 4 +++#define PR_BITS_PER_INT_LOG2 5 +++#define PR_BITS_PER_INT64_LOG2 6 +++#define PR_BITS_PER_LONG_LOG2 6 +++#define PR_BITS_PER_FLOAT_LOG2 5 +++#define PR_BITS_PER_DOUBLE_LOG2 6 +++#define PR_BITS_PER_WORD_LOG2 6 +++ +++#define PR_ALIGN_OF_SHORT 2 +++#define PR_ALIGN_OF_INT 4 +++#define PR_ALIGN_OF_LONG 8 +++#define PR_ALIGN_OF_INT64 8 +++#define PR_ALIGN_OF_FLOAT 4 +++#define PR_ALIGN_OF_DOUBLE 8 +++#define PR_ALIGN_OF_POINTER 8 +++#define PR_ALIGN_OF_WORD 8 +++ +++#define PR_BYTES_PER_WORD_LOG2 3 +++#define PR_BYTES_PER_DWORD_LOG2 3 +++ +++#elif defined(__x86_64__) +++ +++#ifdef __ILP32__ +++ +++#define IS_LITTLE_ENDIAN 1 +++#undef IS_BIG_ENDIAN +++ +++#define PR_BYTES_PER_BYTE 1 +++#define PR_BYTES_PER_SHORT 2 +++#define PR_BYTES_PER_INT 4 +++#define PR_BYTES_PER_INT64 8 +++#define PR_BYTES_PER_LONG 4 +++#define PR_BYTES_PER_FLOAT 4 +++#define PR_BYTES_PER_DOUBLE 8 +++#define PR_BYTES_PER_WORD 4 +++#define PR_BYTES_PER_DWORD 8 +++ +++#define PR_BITS_PER_BYTE 8 +++#define PR_BITS_PER_SHORT 16 +++#define PR_BITS_PER_INT 32 +++#define PR_BITS_PER_INT64 64 +++#define PR_BITS_PER_LONG 32 +++#define PR_BITS_PER_FLOAT 32 +++#define PR_BITS_PER_DOUBLE 64 +++#define PR_BITS_PER_WORD 32 +++ +++#define PR_BITS_PER_BYTE_LOG2 3 +++#define PR_BITS_PER_SHORT_LOG2 4 +++#define PR_BITS_PER_INT_LOG2 5 +++#define PR_BITS_PER_INT64_LOG2 6 +++#define PR_BITS_PER_LONG_LOG2 5 +++#define PR_BITS_PER_FLOAT_LOG2 5 +++#define PR_BITS_PER_DOUBLE_LOG2 6 +++#define PR_BITS_PER_WORD_LOG2 5 +++ +++#define PR_ALIGN_OF_SHORT 2 +++#define PR_ALIGN_OF_INT 4 +++#define PR_ALIGN_OF_LONG 4 +++#define PR_ALIGN_OF_INT64 8 +++#define PR_ALIGN_OF_FLOAT 4 +++#define PR_ALIGN_OF_DOUBLE 8 +++#define PR_ALIGN_OF_POINTER 4 +++#define PR_ALIGN_OF_WORD 4 +++ +++#define PR_BYTES_PER_WORD_LOG2 2 +++#define PR_BYTES_PER_DWORD_LOG2 3 +++ +++#else +++ +++#define IS_LITTLE_ENDIAN 1 +++#undef IS_BIG_ENDIAN +++#define IS_64 +++ +++#define PR_BYTES_PER_BYTE 1 +++#define PR_BYTES_PER_SHORT 2 +++#define PR_BYTES_PER_INT 4 +++#define PR_BYTES_PER_INT64 8 +++#define PR_BYTES_PER_LONG 8 +++#define PR_BYTES_PER_FLOAT 4 +++#define PR_BYTES_PER_DOUBLE 8 +++#define PR_BYTES_PER_WORD 8 +++#define PR_BYTES_PER_DWORD 8 +++ +++#define PR_BITS_PER_BYTE 8 +++#define PR_BITS_PER_SHORT 16 +++#define PR_BITS_PER_INT 32 +++#define PR_BITS_PER_INT64 64 +++#define PR_BITS_PER_LONG 64 +++#define PR_BITS_PER_FLOAT 32 +++#define PR_BITS_PER_DOUBLE 64 +++#define PR_BITS_PER_WORD 64 +++ +++#define PR_BITS_PER_BYTE_LOG2 3 +++#define PR_BITS_PER_SHORT_LOG2 4 +++#define PR_BITS_PER_INT_LOG2 5 +++#define PR_BITS_PER_INT64_LOG2 6 +++#define PR_BITS_PER_LONG_LOG2 6 +++#define PR_BITS_PER_FLOAT_LOG2 5 +++#define PR_BITS_PER_DOUBLE_LOG2 6 +++#define PR_BITS_PER_WORD_LOG2 6 +++ +++#define PR_ALIGN_OF_SHORT 2 +++#define PR_ALIGN_OF_INT 4 +++#define PR_ALIGN_OF_LONG 8 +++#define PR_ALIGN_OF_INT64 8 +++#define PR_ALIGN_OF_FLOAT 4 +++#define PR_ALIGN_OF_DOUBLE 8 +++#define PR_ALIGN_OF_POINTER 8 +++#define PR_ALIGN_OF_WORD 8 +++ +++#define PR_BYTES_PER_WORD_LOG2 3 +++#define PR_BYTES_PER_DWORD_LOG2 3 +++ +++#endif +++ +++#elif defined(__mc68000__) +++ +++#undef IS_LITTLE_ENDIAN +++#define IS_BIG_ENDIAN 1 +++ +++#define PR_BYTES_PER_BYTE 1 +++#define PR_BYTES_PER_SHORT 2 +++#define PR_BYTES_PER_INT 4 +++#define PR_BYTES_PER_INT64 8 +++#define PR_BYTES_PER_LONG 4 +++#define PR_BYTES_PER_FLOAT 4 +++#define PR_BYTES_PER_DOUBLE 8 +++#define PR_BYTES_PER_WORD 4 +++#define PR_BYTES_PER_DWORD 8 +++ +++#define PR_BITS_PER_BYTE 8 +++#define PR_BITS_PER_SHORT 16 +++#define PR_BITS_PER_INT 32 +++#define PR_BITS_PER_INT64 64 +++#define PR_BITS_PER_LONG 32 +++#define PR_BITS_PER_FLOAT 32 +++#define PR_BITS_PER_DOUBLE 64 +++#define PR_BITS_PER_WORD 32 +++ +++#define PR_BITS_PER_BYTE_LOG2 3 +++#define PR_BITS_PER_SHORT_LOG2 4 +++#define PR_BITS_PER_INT_LOG2 5 +++#define PR_BITS_PER_INT64_LOG2 6 +++#define PR_BITS_PER_LONG_LOG2 5 +++#define PR_BITS_PER_FLOAT_LOG2 5 +++#define PR_BITS_PER_DOUBLE_LOG2 6 +++#define PR_BITS_PER_WORD_LOG2 5 +++ +++#define PR_ALIGN_OF_SHORT 2 +++#define PR_ALIGN_OF_INT 2 +++#define PR_ALIGN_OF_LONG 2 +++#define PR_ALIGN_OF_INT64 2 +++#define PR_ALIGN_OF_FLOAT 2 +++#define PR_ALIGN_OF_DOUBLE 2 +++#define PR_ALIGN_OF_POINTER 2 +++#define PR_ALIGN_OF_WORD 2 +++ +++#define PR_BYTES_PER_WORD_LOG2 2 +++#define PR_BYTES_PER_DWORD_LOG2 3 +++ +++#elif defined(__sparc__) && defined (__arch64__) +++ +++#undef IS_LITTLE_ENDIAN +++#define IS_BIG_ENDIAN 1 +++#define IS_64 +++ +++#define PR_BYTES_PER_BYTE 1 +++#define PR_BYTES_PER_SHORT 2 +++#define PR_BYTES_PER_INT 4 +++#define PR_BYTES_PER_INT64 8 +++#define PR_BYTES_PER_LONG 8 +++#define PR_BYTES_PER_FLOAT 4 +++#define PR_BYTES_PER_DOUBLE 8 +++#define PR_BYTES_PER_WORD 8 +++#define PR_BYTES_PER_DWORD 8 +++ +++#define PR_BITS_PER_BYTE 8 +++#define PR_BITS_PER_SHORT 16 +++#define PR_BITS_PER_INT 32 +++#define PR_BITS_PER_INT64 64 +++#define PR_BITS_PER_LONG 64 +++#define PR_BITS_PER_FLOAT 32 +++#define PR_BITS_PER_DOUBLE 64 +++#define PR_BITS_PER_WORD 64 +++ +++#define PR_BITS_PER_BYTE_LOG2 3 +++#define PR_BITS_PER_SHORT_LOG2 4 +++#define PR_BITS_PER_INT_LOG2 5 +++#define PR_BITS_PER_INT64_LOG2 6 +++#define PR_BITS_PER_LONG_LOG2 6 +++#define PR_BITS_PER_FLOAT_LOG2 5 +++#define PR_BITS_PER_DOUBLE_LOG2 6 +++#define PR_BITS_PER_WORD_LOG2 6 +++ +++#define PR_ALIGN_OF_SHORT 2 +++#define PR_ALIGN_OF_INT 4 +++#define PR_ALIGN_OF_INT64 8 +++#define PR_ALIGN_OF_LONG 8 +++#define PR_ALIGN_OF_FLOAT 4 +++#define PR_ALIGN_OF_DOUBLE 8 +++#define PR_ALIGN_OF_POINTER 8 +++#define PR_ALIGN_OF_WORD 8 +++ +++#define PR_BYTES_PER_WORD_LOG2 3 +++#define PR_BYTES_PER_DWORD_LOG2 3 +++ +++#elif defined(__sparc__) +++ +++#undef IS_LITTLE_ENDIAN +++#define IS_BIG_ENDIAN 1 +++ +++#define PR_BYTES_PER_BYTE 1 +++#define PR_BYTES_PER_SHORT 2 +++#define PR_BYTES_PER_INT 4 +++#define PR_BYTES_PER_INT64 8 +++#define PR_BYTES_PER_LONG 4 +++#define PR_BYTES_PER_FLOAT 4 +++#define PR_BYTES_PER_DOUBLE 8 +++#define PR_BYTES_PER_WORD 4 +++#define PR_BYTES_PER_DWORD 8 +++ +++#define PR_BITS_PER_BYTE 8 +++#define PR_BITS_PER_SHORT 16 +++#define PR_BITS_PER_INT 32 +++#define PR_BITS_PER_INT64 64 +++#define PR_BITS_PER_LONG 32 +++#define PR_BITS_PER_FLOAT 32 +++#define PR_BITS_PER_DOUBLE 64 +++#define PR_BITS_PER_WORD 32 +++ +++#define PR_BITS_PER_BYTE_LOG2 3 +++#define PR_BITS_PER_SHORT_LOG2 4 +++#define PR_BITS_PER_INT_LOG2 5 +++#define PR_BITS_PER_INT64_LOG2 6 +++#define PR_BITS_PER_LONG_LOG2 5 +++#define PR_BITS_PER_FLOAT_LOG2 5 +++#define PR_BITS_PER_DOUBLE_LOG2 6 +++#define PR_BITS_PER_WORD_LOG2 5 +++ +++#define PR_ALIGN_OF_SHORT 2 +++#define PR_ALIGN_OF_INT 4 +++#define PR_ALIGN_OF_LONG 4 +++#define PR_ALIGN_OF_INT64 8 +++#define PR_ALIGN_OF_FLOAT 4 +++#define PR_ALIGN_OF_DOUBLE 8 +++#define PR_ALIGN_OF_POINTER 4 +++#define PR_ALIGN_OF_WORD 4 +++ +++#define PR_BYTES_PER_WORD_LOG2 2 +++#define PR_BYTES_PER_DWORD_LOG2 3 +++ +++#elif defined(__i386__) +++ +++#define IS_LITTLE_ENDIAN 1 +++#undef IS_BIG_ENDIAN +++ +++#define PR_BYTES_PER_BYTE 1 +++#define PR_BYTES_PER_SHORT 2 +++#define PR_BYTES_PER_INT 4 +++#define PR_BYTES_PER_INT64 8 +++#define PR_BYTES_PER_LONG 4 +++#define PR_BYTES_PER_FLOAT 4 +++#define PR_BYTES_PER_DOUBLE 8 +++#define PR_BYTES_PER_WORD 4 +++#define PR_BYTES_PER_DWORD 8 +++ +++#define PR_BITS_PER_BYTE 8 +++#define PR_BITS_PER_SHORT 16 +++#define PR_BITS_PER_INT 32 +++#define PR_BITS_PER_INT64 64 +++#define PR_BITS_PER_LONG 32 +++#define PR_BITS_PER_FLOAT 32 +++#define PR_BITS_PER_DOUBLE 64 +++#define PR_BITS_PER_WORD 32 +++ +++#define PR_BITS_PER_BYTE_LOG2 3 +++#define PR_BITS_PER_SHORT_LOG2 4 +++#define PR_BITS_PER_INT_LOG2 5 +++#define PR_BITS_PER_INT64_LOG2 6 +++#define PR_BITS_PER_LONG_LOG2 5 +++#define PR_BITS_PER_FLOAT_LOG2 5 +++#define PR_BITS_PER_DOUBLE_LOG2 6 +++#define PR_BITS_PER_WORD_LOG2 5 +++ +++#define PR_ALIGN_OF_SHORT 2 +++#define PR_ALIGN_OF_INT 4 +++#define PR_ALIGN_OF_LONG 4 +++#define PR_ALIGN_OF_INT64 4 +++#define PR_ALIGN_OF_FLOAT 4 +++#define PR_ALIGN_OF_DOUBLE 4 +++#define PR_ALIGN_OF_POINTER 4 +++#define PR_ALIGN_OF_WORD 4 +++ +++#define PR_BYTES_PER_WORD_LOG2 2 +++#define PR_BYTES_PER_DWORD_LOG2 3 +++ +++#elif defined(__mips__) +++ +++/* For _ABI64 */ +++#include +++ +++#ifdef __MIPSEB__ +++#define IS_BIG_ENDIAN 1 +++#undef IS_LITTLE_ENDIAN +++#elif defined(__MIPSEL__) +++#define IS_LITTLE_ENDIAN 1 +++#undef IS_BIG_ENDIAN +++#else +++#error "Unknown MIPS endianness." +++#endif +++ +++#if _MIPS_SIM == _ABI64 +++ +++#define IS_64 +++ +++#define PR_BYTES_PER_BYTE 1 +++#define PR_BYTES_PER_SHORT 2 +++#define PR_BYTES_PER_INT 4 +++#define PR_BYTES_PER_INT64 8 +++#define PR_BYTES_PER_LONG 8 +++#define PR_BYTES_PER_FLOAT 4 +++#define PR_BYTES_PER_DOUBLE 8 +++#define PR_BYTES_PER_WORD 8 +++#define PR_BYTES_PER_DWORD 8 +++ +++#define PR_BITS_PER_BYTE 8 +++#define PR_BITS_PER_SHORT 16 +++#define PR_BITS_PER_INT 32 +++#define PR_BITS_PER_INT64 64 +++#define PR_BITS_PER_LONG 64 +++#define PR_BITS_PER_FLOAT 32 +++#define PR_BITS_PER_DOUBLE 64 +++#define PR_BITS_PER_WORD 64 +++ +++#define PR_BITS_PER_BYTE_LOG2 3 +++#define PR_BITS_PER_SHORT_LOG2 4 +++#define PR_BITS_PER_INT_LOG2 5 +++#define PR_BITS_PER_INT64_LOG2 6 +++#define PR_BITS_PER_LONG_LOG2 6 +++#define PR_BITS_PER_FLOAT_LOG2 5 +++#define PR_BITS_PER_DOUBLE_LOG2 6 +++#define PR_BITS_PER_WORD_LOG2 6 +++ +++#define PR_ALIGN_OF_SHORT 2 +++#define PR_ALIGN_OF_INT 4 +++#define PR_ALIGN_OF_LONG 8 +++#define PR_ALIGN_OF_INT64 8 +++#define PR_ALIGN_OF_FLOAT 4 +++#define PR_ALIGN_OF_DOUBLE 8 +++#define PR_ALIGN_OF_POINTER 8 +++#define PR_ALIGN_OF_WORD 8 +++ +++#define PR_BYTES_PER_WORD_LOG2 3 +++#define PR_BYTES_PER_DWORD_LOG2 3 +++ +++#else /* _ABI64 */ +++ +++#define PR_BYTES_PER_BYTE 1 +++#define PR_BYTES_PER_SHORT 2 +++#define PR_BYTES_PER_INT 4 +++#define PR_BYTES_PER_INT64 8 +++#define PR_BYTES_PER_LONG 4 +++#define PR_BYTES_PER_FLOAT 4 +++#define PR_BYTES_PER_DOUBLE 8 +++#define PR_BYTES_PER_WORD 4 +++#define PR_BYTES_PER_DWORD 8 +++ +++#define PR_BITS_PER_BYTE 8 +++#define PR_BITS_PER_SHORT 16 +++#define PR_BITS_PER_INT 32 +++#define PR_BITS_PER_INT64 64 +++#define PR_BITS_PER_LONG 32 +++#define PR_BITS_PER_FLOAT 32 +++#define PR_BITS_PER_DOUBLE 64 +++#define PR_BITS_PER_WORD 32 +++ +++#define PR_BITS_PER_BYTE_LOG2 3 +++#define PR_BITS_PER_SHORT_LOG2 4 +++#define PR_BITS_PER_INT_LOG2 5 +++#define PR_BITS_PER_INT64_LOG2 6 +++#define PR_BITS_PER_LONG_LOG2 5 +++#define PR_BITS_PER_FLOAT_LOG2 5 +++#define PR_BITS_PER_DOUBLE_LOG2 6 +++#define PR_BITS_PER_WORD_LOG2 5 +++ +++#define PR_ALIGN_OF_SHORT 2 +++#define PR_ALIGN_OF_INT 4 +++#define PR_ALIGN_OF_LONG 4 +++#define PR_ALIGN_OF_INT64 8 +++#define PR_ALIGN_OF_FLOAT 4 +++#define PR_ALIGN_OF_DOUBLE 8 +++#define PR_ALIGN_OF_POINTER 4 +++#define PR_ALIGN_OF_WORD 4 +++ +++#define PR_BYTES_PER_WORD_LOG2 2 +++#define PR_BYTES_PER_DWORD_LOG2 3 +++ +++#endif /* _ABI64 */ +++ +++#elif defined(__arm__) +++ +++#ifdef __ARMEB__ +++#undef IS_LITTLE_ENDIAN +++#define IS_BIG_ENDIAN 1 +++#elif defined(__ARMEL__) +++#define IS_LITTLE_ENDIAN 1 +++#undef IS_BIG_ENDIAN +++#else +++#error "Unknown ARM endianness." +++#endif +++ +++#define PR_BYTES_PER_BYTE 1 +++#define PR_BYTES_PER_SHORT 2 +++#define PR_BYTES_PER_INT 4 +++#define PR_BYTES_PER_INT64 8 +++#define PR_BYTES_PER_LONG 4 +++#define PR_BYTES_PER_FLOAT 4 +++#define PR_BYTES_PER_DOUBLE 8 +++#define PR_BYTES_PER_WORD 4 +++#define PR_BYTES_PER_DWORD 8 +++ +++#define PR_BITS_PER_BYTE 8 +++#define PR_BITS_PER_SHORT 16 +++#define PR_BITS_PER_INT 32 +++#define PR_BITS_PER_INT64 64 +++#define PR_BITS_PER_LONG 32 +++#define PR_BITS_PER_FLOAT 32 +++#define PR_BITS_PER_DOUBLE 64 +++#define PR_BITS_PER_WORD 32 +++ +++#define PR_BITS_PER_BYTE_LOG2 3 +++#define PR_BITS_PER_SHORT_LOG2 4 +++#define PR_BITS_PER_INT_LOG2 5 +++#define PR_BITS_PER_INT64_LOG2 6 +++#define PR_BITS_PER_LONG_LOG2 5 +++#define PR_BITS_PER_FLOAT_LOG2 5 +++#define PR_BITS_PER_DOUBLE_LOG2 6 +++#define PR_BITS_PER_WORD_LOG2 5 +++ +++#define PR_ALIGN_OF_SHORT 2 +++#define PR_ALIGN_OF_INT 4 +++#define PR_ALIGN_OF_LONG 4 +++#define PR_ALIGN_OF_INT64 4 +++#define PR_ALIGN_OF_FLOAT 4 +++#define PR_ALIGN_OF_DOUBLE 4 +++#define PR_ALIGN_OF_POINTER 4 +++#define PR_ALIGN_OF_WORD 4 +++ +++#define PR_BYTES_PER_WORD_LOG2 2 +++#define PR_BYTES_PER_DWORD_LOG2 3 +++ +++#elif defined(__aarch64__) +++ +++#ifdef __AARCH64EB__ +++#undef IS_LITTLE_ENDIAN +++#define IS_BIG_ENDIAN 1 +++#elif defined(__AARCH64EL__) +++#define IS_LITTLE_ENDIAN 1 +++#undef IS_BIG_ENDIAN +++#else +++#error "Unknown Aarch64 endianness." +++#endif +++#define IS_64 +++ +++#define PR_BYTES_PER_BYTE 1 +++#define PR_BYTES_PER_SHORT 2 +++#define PR_BYTES_PER_INT 4 +++#define PR_BYTES_PER_INT64 8 +++#define PR_BYTES_PER_LONG 8 +++#define PR_BYTES_PER_FLOAT 4 +++#define PR_BYTES_PER_DOUBLE 8 +++#define PR_BYTES_PER_WORD 8 +++#define PR_BYTES_PER_DWORD 8 +++ +++#define PR_BITS_PER_BYTE 8 +++#define PR_BITS_PER_SHORT 16 +++#define PR_BITS_PER_INT 32 +++#define PR_BITS_PER_INT64 64 +++#define PR_BITS_PER_LONG 64 +++#define PR_BITS_PER_FLOAT 32 +++#define PR_BITS_PER_DOUBLE 64 +++#define PR_BITS_PER_WORD 64 +++ +++#define PR_BITS_PER_BYTE_LOG2 3 +++#define PR_BITS_PER_SHORT_LOG2 4 +++#define PR_BITS_PER_INT_LOG2 5 +++#define PR_BITS_PER_INT64_LOG2 6 +++#define PR_BITS_PER_LONG_LOG2 6 +++#define PR_BITS_PER_FLOAT_LOG2 5 +++#define PR_BITS_PER_DOUBLE_LOG2 6 +++#define PR_BITS_PER_WORD_LOG2 6 +++ +++#define PR_ALIGN_OF_SHORT 2 +++#define PR_ALIGN_OF_INT 4 +++#define PR_ALIGN_OF_LONG 8 +++#define PR_ALIGN_OF_INT64 8 +++#define PR_ALIGN_OF_FLOAT 4 +++#define PR_ALIGN_OF_DOUBLE 8 +++#define PR_ALIGN_OF_POINTER 8 +++#define PR_ALIGN_OF_WORD 8 +++ +++#define PR_BYTES_PER_WORD_LOG2 3 +++#define PR_BYTES_PER_DWORD_LOG2 3 +++ +++#elif defined(__hppa__) +++ +++#undef IS_LITTLE_ENDIAN +++#define IS_BIG_ENDIAN 1 +++ +++#define PR_BYTES_PER_BYTE 1 +++#define PR_BYTES_PER_SHORT 2 +++#define PR_BYTES_PER_INT 4 +++#define PR_BYTES_PER_INT64 8 +++#define PR_BYTES_PER_LONG 4 +++#define PR_BYTES_PER_FLOAT 4 +++#define PR_BYTES_PER_DOUBLE 8 +++#define PR_BYTES_PER_WORD 4 +++#define PR_BYTES_PER_DWORD 8 +++ +++#define PR_BITS_PER_BYTE 8 +++#define PR_BITS_PER_SHORT 16 +++#define PR_BITS_PER_INT 32 +++#define PR_BITS_PER_INT64 64 +++#define PR_BITS_PER_LONG 32 +++#define PR_BITS_PER_FLOAT 32 +++#define PR_BITS_PER_DOUBLE 64 +++#define PR_BITS_PER_WORD 32 +++ +++#define PR_BITS_PER_BYTE_LOG2 3 +++#define PR_BITS_PER_SHORT_LOG2 4 +++#define PR_BITS_PER_INT_LOG2 5 +++#define PR_BITS_PER_INT64_LOG2 6 +++#define PR_BITS_PER_LONG_LOG2 5 +++#define PR_BITS_PER_FLOAT_LOG2 5 +++#define PR_BITS_PER_DOUBLE_LOG2 6 +++#define PR_BITS_PER_WORD_LOG2 5 +++ +++#define PR_ALIGN_OF_SHORT 2 +++#define PR_ALIGN_OF_INT 4 +++#define PR_ALIGN_OF_LONG 4 +++#define PR_ALIGN_OF_INT64 8 +++#define PR_ALIGN_OF_FLOAT 4 +++#define PR_ALIGN_OF_DOUBLE 8 +++#define PR_ALIGN_OF_POINTER 4 +++#define PR_ALIGN_OF_WORD 4 +++ +++#define PR_BYTES_PER_WORD_LOG2 2 +++#define PR_BYTES_PER_DWORD_LOG2 3 +++ +++#elif defined(__s390x__) +++ +++#define IS_BIG_ENDIAN 1 +++#undef IS_LITTLE_ENDIAN +++#define IS_64 +++ +++#define PR_BYTES_PER_BYTE 1 +++#define PR_BYTES_PER_SHORT 2 +++#define PR_BYTES_PER_INT 4 +++#define PR_BYTES_PER_INT64 8 +++#define PR_BYTES_PER_LONG 8 +++#define PR_BYTES_PER_FLOAT 4 +++#define PR_BYTES_PER_DOUBLE 8 +++#define PR_BYTES_PER_WORD 8 +++#define PR_BYTES_PER_DWORD 8 +++ +++#define PR_BITS_PER_BYTE 8 +++#define PR_BITS_PER_SHORT 16 +++#define PR_BITS_PER_INT 32 +++#define PR_BITS_PER_INT64 64 +++#define PR_BITS_PER_LONG 64 +++#define PR_BITS_PER_FLOAT 32 +++#define PR_BITS_PER_DOUBLE 64 +++#define PR_BITS_PER_WORD 64 +++ +++#define PR_BITS_PER_BYTE_LOG2 3 +++#define PR_BITS_PER_SHORT_LOG2 4 +++#define PR_BITS_PER_INT_LOG2 5 +++#define PR_BITS_PER_INT64_LOG2 6 +++#define PR_BITS_PER_LONG_LOG2 6 +++#define PR_BITS_PER_FLOAT_LOG2 5 +++#define PR_BITS_PER_DOUBLE_LOG2 6 +++#define PR_BITS_PER_WORD_LOG2 6 +++ +++#define PR_ALIGN_OF_SHORT 2 +++#define PR_ALIGN_OF_INT 4 +++#define PR_ALIGN_OF_LONG 8 +++#define PR_ALIGN_OF_INT64 8 +++#define PR_ALIGN_OF_FLOAT 4 +++#define PR_ALIGN_OF_DOUBLE 8 +++#define PR_ALIGN_OF_POINTER 8 +++#define PR_ALIGN_OF_WORD 8 +++ +++#define PR_BYTES_PER_WORD_LOG2 3 +++#define PR_BYTES_PER_DWORD_LOG2 3 +++ +++#elif defined(__s390__) +++ +++#define IS_BIG_ENDIAN 1 +++#undef IS_LITTLE_ENDIAN +++ +++#define PR_BYTES_PER_BYTE 1 +++#define PR_BYTES_PER_SHORT 2 +++#define PR_BYTES_PER_INT 4 +++#define PR_BYTES_PER_INT64 8 +++#define PR_BYTES_PER_LONG 4 +++#define PR_BYTES_PER_FLOAT 4 +++#define PR_BYTES_PER_DOUBLE 8 +++#define PR_BYTES_PER_WORD 4 +++#define PR_BYTES_PER_DWORD 8 +++ +++#define PR_BITS_PER_BYTE 8 +++#define PR_BITS_PER_SHORT 16 +++#define PR_BITS_PER_INT 32 +++#define PR_BITS_PER_INT64 64 +++#define PR_BITS_PER_LONG 32 +++#define PR_BITS_PER_FLOAT 32 +++#define PR_BITS_PER_DOUBLE 64 +++#define PR_BITS_PER_WORD 32 +++ +++#define PR_BITS_PER_BYTE_LOG2 3 +++#define PR_BITS_PER_SHORT_LOG2 4 +++#define PR_BITS_PER_INT_LOG2 5 +++#define PR_BITS_PER_INT64_LOG2 6 +++#define PR_BITS_PER_LONG_LOG2 5 +++#define PR_BITS_PER_FLOAT_LOG2 5 +++#define PR_BITS_PER_DOUBLE_LOG2 6 +++#define PR_BITS_PER_WORD_LOG2 5 +++ +++#define PR_ALIGN_OF_SHORT 2 +++#define PR_ALIGN_OF_INT 4 +++#define PR_ALIGN_OF_LONG 4 +++#define PR_ALIGN_OF_INT64 4 +++#define PR_ALIGN_OF_FLOAT 4 +++#define PR_ALIGN_OF_DOUBLE 4 +++#define PR_ALIGN_OF_POINTER 4 +++#define PR_ALIGN_OF_WORD 4 +++ +++#define PR_BYTES_PER_WORD_LOG2 2 +++#define PR_BYTES_PER_DWORD_LOG2 3 +++ +++#elif defined(__sh__) +++ +++#define IS_LITTLE_ENDIAN 1 +++#undef IS_BIG_ENDIAN +++ +++#define PR_BYTES_PER_BYTE 1 +++#define PR_BYTES_PER_SHORT 2 +++#define PR_BYTES_PER_INT 4 +++#define PR_BYTES_PER_INT64 8 +++#define PR_BYTES_PER_LONG 4 +++#define PR_BYTES_PER_FLOAT 4 +++#define PR_BYTES_PER_DOUBLE 8 +++#define PR_BYTES_PER_WORD 4 +++#define PR_BYTES_PER_DWORD 8 +++ +++#define PR_BITS_PER_BYTE 8 +++#define PR_BITS_PER_SHORT 16 +++#define PR_BITS_PER_INT 32 +++#define PR_BITS_PER_INT64 64 +++#define PR_BITS_PER_LONG 32 +++#define PR_BITS_PER_FLOAT 32 +++#define PR_BITS_PER_DOUBLE 64 +++#define PR_BITS_PER_WORD 32 +++ +++#define PR_BITS_PER_BYTE_LOG2 3 +++#define PR_BITS_PER_SHORT_LOG2 4 +++#define PR_BITS_PER_INT_LOG2 5 +++#define PR_BITS_PER_INT64_LOG2 6 +++#define PR_BITS_PER_LONG_LOG2 5 +++#define PR_BITS_PER_FLOAT_LOG2 5 +++#define PR_BITS_PER_DOUBLE_LOG2 6 +++#define PR_BITS_PER_WORD_LOG2 5 +++ +++#define PR_ALIGN_OF_SHORT 2 +++#define PR_ALIGN_OF_INT 4 +++#define PR_ALIGN_OF_LONG 4 +++#define PR_ALIGN_OF_INT64 4 +++#define PR_ALIGN_OF_FLOAT 4 +++#define PR_ALIGN_OF_DOUBLE 4 +++#define PR_ALIGN_OF_POINTER 4 +++#define PR_ALIGN_OF_WORD 4 +++ +++#define PR_BYTES_PER_WORD_LOG2 2 +++#define PR_BYTES_PER_DWORD_LOG2 3 +++ +++#elif defined(__avr32__) +++ +++#undef IS_LITTLE_ENDIAN +++#define IS_BIG_ENDIAN 1 +++ +++#define PR_BYTES_PER_BYTE 1 +++#define PR_BYTES_PER_SHORT 2 +++#define PR_BYTES_PER_INT 4 +++#define PR_BYTES_PER_INT64 8 +++#define PR_BYTES_PER_LONG 4 +++#define PR_BYTES_PER_FLOAT 4 +++#define PR_BYTES_PER_DOUBLE 8 +++#define PR_BYTES_PER_WORD 4 +++#define PR_BYTES_PER_DWORD 8 +++ +++#define PR_BITS_PER_BYTE 8 +++#define PR_BITS_PER_SHORT 16 +++#define PR_BITS_PER_INT 32 +++#define PR_BITS_PER_INT64 64 +++#define PR_BITS_PER_LONG 32 +++#define PR_BITS_PER_FLOAT 32 +++#define PR_BITS_PER_DOUBLE 64 +++#define PR_BITS_PER_WORD 32 +++ +++#define PR_BITS_PER_BYTE_LOG2 3 +++#define PR_BITS_PER_SHORT_LOG2 4 +++#define PR_BITS_PER_INT_LOG2 5 +++#define PR_BITS_PER_INT64_LOG2 6 +++#define PR_BITS_PER_LONG_LOG2 5 +++#define PR_BITS_PER_FLOAT_LOG2 5 +++#define PR_BITS_PER_DOUBLE_LOG2 6 +++#define PR_BITS_PER_WORD_LOG2 5 +++ +++#define PR_ALIGN_OF_SHORT 2 +++#define PR_ALIGN_OF_INT 4 +++#define PR_ALIGN_OF_LONG 4 +++#define PR_ALIGN_OF_INT64 4 +++#define PR_ALIGN_OF_FLOAT 4 +++#define PR_ALIGN_OF_DOUBLE 4 +++#define PR_ALIGN_OF_POINTER 4 +++#define PR_ALIGN_OF_WORD 4 +++ +++#define PR_BYTES_PER_WORD_LOG2 2 +++#define PR_BYTES_PER_DWORD_LOG2 3 +++ +++#elif defined(__m32r__) +++ +++#undef IS_LITTLE_ENDIAN +++#define IS_BIG_ENDIAN 1 +++ +++#define PR_BYTES_PER_BYTE 1 +++#define PR_BYTES_PER_SHORT 2 +++#define PR_BYTES_PER_INT 4 +++#define PR_BYTES_PER_INT64 8 +++#define PR_BYTES_PER_LONG 4 +++#define PR_BYTES_PER_FLOAT 4 +++#define PR_BYTES_PER_DOUBLE 8 +++#define PR_BYTES_PER_WORD 4 +++#define PR_BYTES_PER_DWORD 8 +++ +++#define PR_BITS_PER_BYTE 8 +++#define PR_BITS_PER_SHORT 16 +++#define PR_BITS_PER_INT 32 +++#define PR_BITS_PER_INT64 64 +++#define PR_BITS_PER_LONG 32 +++#define PR_BITS_PER_FLOAT 32 +++#define PR_BITS_PER_DOUBLE 64 +++#define PR_BITS_PER_WORD 32 +++ +++#define PR_BITS_PER_BYTE_LOG2 3 +++#define PR_BITS_PER_SHORT_LOG2 4 +++#define PR_BITS_PER_INT_LOG2 5 +++#define PR_BITS_PER_INT64_LOG2 6 +++#define PR_BITS_PER_LONG_LOG2 5 +++#define PR_BITS_PER_FLOAT_LOG2 5 +++#define PR_BITS_PER_DOUBLE_LOG2 6 +++#define PR_BITS_PER_WORD_LOG2 5 +++ +++#define PR_ALIGN_OF_SHORT 2 +++#define PR_ALIGN_OF_INT 4 +++#define PR_ALIGN_OF_LONG 4 +++#define PR_ALIGN_OF_INT64 4 +++#define PR_ALIGN_OF_FLOAT 4 +++#define PR_ALIGN_OF_DOUBLE 4 +++#define PR_ALIGN_OF_POINTER 4 +++#define PR_ALIGN_OF_WORD 4 +++ +++#define PR_BYTES_PER_WORD_LOG2 2 +++#define PR_BYTES_PER_DWORD_LOG2 3 +++ +++#elif defined(__or1k__) +++ +++#undef IS_LITTLE_ENDIAN +++#define IS_BIG_ENDIAN 1 +++ +++#define PR_BYTES_PER_BYTE 1 +++#define PR_BYTES_PER_SHORT 2 +++#define PR_BYTES_PER_INT 4 +++#define PR_BYTES_PER_INT64 8 +++#define PR_BYTES_PER_LONG 4 +++#define PR_BYTES_PER_FLOAT 4 +++#define PR_BYTES_PER_DOUBLE 8 +++#define PR_BYTES_PER_WORD 4 +++#define PR_BYTES_PER_DWORD 8 +++ +++#define PR_BITS_PER_BYTE 8 +++#define PR_BITS_PER_SHORT 16 +++#define PR_BITS_PER_INT 32 +++#define PR_BITS_PER_INT64 64 +++#define PR_BITS_PER_LONG 32 +++#define PR_BITS_PER_FLOAT 32 +++#define PR_BITS_PER_DOUBLE 64 +++#define PR_BITS_PER_WORD 32 +++ +++#define PR_BITS_PER_BYTE_LOG2 3 +++#define PR_BITS_PER_SHORT_LOG2 4 +++#define PR_BITS_PER_INT_LOG2 5 +++#define PR_BITS_PER_INT64_LOG2 6 +++#define PR_BITS_PER_LONG_LOG2 5 +++#define PR_BITS_PER_FLOAT_LOG2 5 +++#define PR_BITS_PER_DOUBLE_LOG2 6 +++#define PR_BITS_PER_WORD_LOG2 5 +++ +++#define PR_ALIGN_OF_SHORT 2 +++#define PR_ALIGN_OF_INT 4 +++#define PR_ALIGN_OF_LONG 4 +++#define PR_ALIGN_OF_INT64 4 +++#define PR_ALIGN_OF_FLOAT 4 +++#define PR_ALIGN_OF_DOUBLE 4 +++#define PR_ALIGN_OF_POINTER 4 +++#define PR_ALIGN_OF_WORD 4 +++ +++#define PR_BYTES_PER_WORD_LOG2 2 +++#define PR_BYTES_PER_DWORD_LOG2 3 +++ +++#elif defined(__riscv) && (__riscv_xlen == 32) +++ +++#undef IS_BIG_ENDIAN +++#define IS_LITTLE_ENDIAN 1 +++#undef IS_64 +++ +++#define PR_BYTES_PER_BYTE 1 +++#define PR_BYTES_PER_SHORT 2 +++#define PR_BYTES_PER_INT 4 +++#define PR_BYTES_PER_INT64 8 +++#define PR_BYTES_PER_LONG 4 +++#define PR_BYTES_PER_FLOAT 4 +++#define PR_BYTES_PER_DOUBLE 8 +++#define PR_BYTES_PER_WORD 4 +++#define PR_BYTES_PER_DWORD 8 +++ +++#define PR_BITS_PER_BYTE 8 +++#define PR_BITS_PER_SHORT 16 +++#define PR_BITS_PER_INT 32 +++#define PR_BITS_PER_INT64 64 +++#define PR_BITS_PER_LONG 32 +++#define PR_BITS_PER_FLOAT 32 +++#define PR_BITS_PER_DOUBLE 64 +++#define PR_BITS_PER_WORD 32 +++ +++#define PR_BITS_PER_BYTE_LOG2 3 +++#define PR_BITS_PER_SHORT_LOG2 4 +++#define PR_BITS_PER_INT_LOG2 5 +++#define PR_BITS_PER_INT64_LOG2 6 +++#define PR_BITS_PER_LONG_LOG2 5 +++#define PR_BITS_PER_FLOAT_LOG2 5 +++#define PR_BITS_PER_DOUBLE_LOG2 6 +++#define PR_BITS_PER_WORD_LOG2 5 +++ +++#define PR_ALIGN_OF_SHORT 2 +++#define PR_ALIGN_OF_INT 4 +++#define PR_ALIGN_OF_LONG 4 +++#define PR_ALIGN_OF_INT64 8 +++#define PR_ALIGN_OF_FLOAT 4 +++#define PR_ALIGN_OF_DOUBLE 8 +++#define PR_ALIGN_OF_POINTER 4 +++#define PR_ALIGN_OF_WORD 4 +++ +++#define PR_BYTES_PER_WORD_LOG2 2 +++#define PR_BYTES_PER_DWORD_LOG2 3 +++ +++#elif defined(__riscv) && (__riscv_xlen == 64) +++ +++#undef IS_BIG_ENDIAN +++#define IS_LITTLE_ENDIAN 1 +++#define IS_64 +++ +++#define PR_BYTES_PER_BYTE 1 +++#define PR_BYTES_PER_SHORT 2 +++#define PR_BYTES_PER_INT 4 +++#define PR_BYTES_PER_INT64 8 +++#define PR_BYTES_PER_LONG 8 +++#define PR_BYTES_PER_FLOAT 4 +++#define PR_BYTES_PER_DOUBLE 8 +++#define PR_BYTES_PER_WORD 8 +++#define PR_BYTES_PER_DWORD 8 +++ +++#define PR_BITS_PER_BYTE 8 +++#define PR_BITS_PER_SHORT 16 +++#define PR_BITS_PER_INT 32 +++#define PR_BITS_PER_INT64 64 +++#define PR_BITS_PER_LONG 64 +++#define PR_BITS_PER_FLOAT 32 +++#define PR_BITS_PER_DOUBLE 64 +++#define PR_BITS_PER_WORD 64 +++ +++#define PR_BITS_PER_BYTE_LOG2 3 +++#define PR_BITS_PER_SHORT_LOG2 4 +++#define PR_BITS_PER_INT_LOG2 5 +++#define PR_BITS_PER_INT64_LOG2 6 +++#define PR_BITS_PER_LONG_LOG2 6 +++#define PR_BITS_PER_FLOAT_LOG2 5 +++#define PR_BITS_PER_DOUBLE_LOG2 6 +++#define PR_BITS_PER_WORD_LOG2 6 +++ +++#define PR_ALIGN_OF_SHORT 2 +++#define PR_ALIGN_OF_INT 4 +++#define PR_ALIGN_OF_LONG 8 +++#define PR_ALIGN_OF_INT64 8 +++#define PR_ALIGN_OF_FLOAT 4 +++#define PR_ALIGN_OF_DOUBLE 8 +++#define PR_ALIGN_OF_POINTER 8 +++#define PR_ALIGN_OF_WORD 8 +++ +++#define PR_BYTES_PER_WORD_LOG2 3 +++#define PR_BYTES_PER_DWORD_LOG2 3 +++ +++#elif defined(__arc__) +++ +++#define IS_LITTLE_ENDIAN 1 +++#undef IS_BIG_ENDIAN +++ +++#define PR_BYTES_PER_BYTE 1 +++#define PR_BYTES_PER_SHORT 2 +++#define PR_BYTES_PER_INT 4 +++#define PR_BYTES_PER_INT64 8 +++#define PR_BYTES_PER_LONG 4 +++#define PR_BYTES_PER_FLOAT 4 +++#define PR_BYTES_PER_DOUBLE 8 +++#define PR_BYTES_PER_WORD 4 +++#define PR_BYTES_PER_DWORD 8 +++ +++#define PR_BITS_PER_BYTE 8 +++#define PR_BITS_PER_SHORT 16 +++#define PR_BITS_PER_INT 32 +++#define PR_BITS_PER_INT64 64 +++#define PR_BITS_PER_LONG 32 +++#define PR_BITS_PER_FLOAT 32 +++#define PR_BITS_PER_DOUBLE 64 +++#define PR_BITS_PER_WORD 32 +++ +++#define PR_BITS_PER_BYTE_LOG2 3 +++#define PR_BITS_PER_SHORT_LOG2 4 +++#define PR_BITS_PER_INT_LOG2 5 +++#define PR_BITS_PER_INT64_LOG2 6 +++#define PR_BITS_PER_LONG_LOG2 5 +++#define PR_BITS_PER_FLOAT_LOG2 5 +++#define PR_BITS_PER_DOUBLE_LOG2 6 +++#define PR_BITS_PER_WORD_LOG2 5 +++ +++#define PR_ALIGN_OF_SHORT 2 +++#define PR_ALIGN_OF_INT 4 +++#define PR_ALIGN_OF_LONG 4 +++#define PR_ALIGN_OF_INT64 4 +++#define PR_ALIGN_OF_FLOAT 4 +++#define PR_ALIGN_OF_DOUBLE 4 +++#define PR_ALIGN_OF_POINTER 4 +++#define PR_ALIGN_OF_WORD 4 +++ +++#define PR_BYTES_PER_WORD_LOG2 2 +++#define PR_BYTES_PER_DWORD_LOG2 3 +++ +++#elif defined(__nios2__) || defined(__microblaze__) || defined(__nds32__) || \ +++ defined(__xtensa__) +++ +++#if defined(__microblaze__) && defined(__BIG_ENDIAN__) +++#define IS_BIG_ENDIAN 1 +++#undef IS_LITTLE_ENDIAN +++#else +++#define IS_LITTLE_ENDIAN 1 +++#undef IS_BIG_ENDIAN +++#endif +++ +++#define PR_BYTES_PER_BYTE 1 +++#define PR_BYTES_PER_SHORT 2 +++#define PR_BYTES_PER_INT 4 +++#define PR_BYTES_PER_INT64 8 +++#define PR_BYTES_PER_LONG 4 +++#define PR_BYTES_PER_FLOAT 4 +++#define PR_BYTES_PER_DOUBLE 8 +++#define PR_BYTES_PER_WORD 4 +++#define PR_BYTES_PER_DWORD 8 +++ +++#define PR_BITS_PER_BYTE 8 +++#define PR_BITS_PER_SHORT 16 +++#define PR_BITS_PER_INT 32 +++#define PR_BITS_PER_INT64 64 +++#define PR_BITS_PER_LONG 32 +++#define PR_BITS_PER_FLOAT 32 +++#define PR_BITS_PER_DOUBLE 64 +++#define PR_BITS_PER_WORD 32 +++ +++#define PR_BITS_PER_BYTE_LOG2 3 +++#define PR_BITS_PER_SHORT_LOG2 4 +++#define PR_BITS_PER_INT_LOG2 5 +++#define PR_BITS_PER_INT64_LOG2 6 +++#define PR_BITS_PER_LONG_LOG2 5 +++#define PR_BITS_PER_FLOAT_LOG2 5 +++#define PR_BITS_PER_DOUBLE_LOG2 6 +++#define PR_BITS_PER_WORD_LOG2 5 +++ +++#define PR_ALIGN_OF_SHORT 2 +++#define PR_ALIGN_OF_INT 4 +++#define PR_ALIGN_OF_LONG 4 +++#define PR_ALIGN_OF_INT64 4 +++#define PR_ALIGN_OF_FLOAT 4 +++#define PR_ALIGN_OF_DOUBLE 4 +++#define PR_ALIGN_OF_POINTER 4 +++#define PR_ALIGN_OF_WORD 4 +++ +++#define PR_BYTES_PER_WORD_LOG2 2 +++#define PR_BYTES_PER_DWORD_LOG2 3 +++ +++#elif defined(__e2k__) +++ +++#define IS_LITTLE_ENDIAN 1 +++#undef IS_BIG_ENDIAN +++ +++#define IS_64 +++ +++#define PR_BYTES_PER_BYTE 1 +++#define PR_BYTES_PER_SHORT 2 +++#define PR_BYTES_PER_INT 4 +++#define PR_BYTES_PER_INT64 4 +++#define PR_BYTES_PER_LONG 8 +++#define PR_BYTES_PER_FLOAT 4 +++#define PR_BYTES_PER_DOUBLE 8 +++#define PR_BYTES_PER_WORD 8 +++#define PR_BYTES_PER_DWORD 8 +++ +++#define PR_BITS_PER_BYTE 8 +++#define PR_BITS_PER_SHORT 16 +++#define PR_BITS_PER_INT 32 +++#define PR_BITS_PER_INT64 32 +++#define PR_BITS_PER_LONG 64 +++#define PR_BITS_PER_FLOAT 32 +++#define PR_BITS_PER_DOUBLE 64 +++#define PR_BITS_PER_WORD 64 +++ +++#define PR_BITS_PER_BYTE_LOG2 3 +++#define PR_BITS_PER_SHORT_LOG2 4 +++#define PR_BITS_PER_INT_LOG2 5 +++#define PR_BITS_PER_INT64_LOG2 5 +++#define PR_BITS_PER_LONG_LOG2 6 +++#define PR_BITS_PER_FLOAT_LOG2 5 +++#define PR_BITS_PER_DOUBLE_LOG2 6 +++#define PR_BITS_PER_WORD_LOG2 6 +++ +++#define PR_ALIGN_OF_SHORT 2 +++#define PR_ALIGN_OF_INT 4 +++#define PR_ALIGN_OF_LONG 8 +++#define PR_ALIGN_OF_INT64 4 +++#define PR_ALIGN_OF_FLOAT 4 +++#define PR_ALIGN_OF_DOUBLE 8 +++#define PR_ALIGN_OF_POINTER 8 +++#define PR_ALIGN_OF_WORD 8 +++ +++#define PR_BYTES_PER_WORD_LOG2 3 +++#define PR_BYTES_PER_DWORD_LOG2 3 +++ +++#else +++ +++#error "Unknown CPU architecture" +++ +++#endif +++ +++#ifndef HAVE_LONG_LONG +++#define HAVE_LONG_LONG +++#endif +++#if PR_ALIGN_OF_DOUBLE == 8 +++#define HAVE_ALIGNED_DOUBLES +++#endif +++#if PR_ALIGN_OF_INT64 == 8 +++#define HAVE_ALIGNED_LONGLONGS +++#endif +++ +++#ifndef NO_NSPR_10_SUPPORT +++ +++#define BYTES_PER_BYTE PR_BYTES_PER_BYTE +++#define BYTES_PER_SHORT PR_BYTES_PER_SHORT +++#define BYTES_PER_INT PR_BYTES_PER_INT +++#define BYTES_PER_INT64 PR_BYTES_PER_INT64 +++#define BYTES_PER_LONG PR_BYTES_PER_LONG +++#define BYTES_PER_FLOAT PR_BYTES_PER_FLOAT +++#define BYTES_PER_DOUBLE PR_BYTES_PER_DOUBLE +++#define BYTES_PER_WORD PR_BYTES_PER_WORD +++#define BYTES_PER_DWORD PR_BYTES_PER_DWORD +++ +++#define BITS_PER_BYTE PR_BITS_PER_BYTE +++#define BITS_PER_SHORT PR_BITS_PER_SHORT +++#define BITS_PER_INT PR_BITS_PER_INT +++#define BITS_PER_INT64 PR_BITS_PER_INT64 +++#define BITS_PER_LONG PR_BITS_PER_LONG +++#define BITS_PER_FLOAT PR_BITS_PER_FLOAT +++#define BITS_PER_DOUBLE PR_BITS_PER_DOUBLE +++#define BITS_PER_WORD PR_BITS_PER_WORD +++ +++#define BITS_PER_BYTE_LOG2 PR_BITS_PER_BYTE_LOG2 +++#define BITS_PER_SHORT_LOG2 PR_BITS_PER_SHORT_LOG2 +++#define BITS_PER_INT_LOG2 PR_BITS_PER_INT_LOG2 +++#define BITS_PER_INT64_LOG2 PR_BITS_PER_INT64_LOG2 +++#define BITS_PER_LONG_LOG2 PR_BITS_PER_LONG_LOG2 +++#define BITS_PER_FLOAT_LOG2 PR_BITS_PER_FLOAT_LOG2 +++#define BITS_PER_DOUBLE_LOG2 PR_BITS_PER_DOUBLE_LOG2 +++#define BITS_PER_WORD_LOG2 PR_BITS_PER_WORD_LOG2 +++ +++#define ALIGN_OF_SHORT PR_ALIGN_OF_SHORT +++#define ALIGN_OF_INT PR_ALIGN_OF_INT +++#define ALIGN_OF_LONG PR_ALIGN_OF_LONG +++#define ALIGN_OF_INT64 PR_ALIGN_OF_INT64 +++#define ALIGN_OF_FLOAT PR_ALIGN_OF_FLOAT +++#define ALIGN_OF_DOUBLE PR_ALIGN_OF_DOUBLE +++#define ALIGN_OF_POINTER PR_ALIGN_OF_POINTER +++#define ALIGN_OF_WORD PR_ALIGN_OF_WORD +++ +++#define BYTES_PER_WORD_LOG2 PR_BYTES_PER_WORD_LOG2 +++#define BYTES_PER_DWORD_LOG2 PR_BYTES_PER_DWORD_LOG2 +++#define WORDS_PER_DWORD_LOG2 PR_WORDS_PER_DWORD_LOG2 +++ +++#endif /* NO_NSPR_10_SUPPORT */ +++ +++#endif /* nspr_cpucfg___ */ ++--- /dev/null +++++ b/src/jdk.crypto.cryptoki/share/native/libj2pkcs11/prtypes.h ++@@ -0,0 +1,520 @@ +++/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +++/* This Source Code Form is subject to the terms of the Mozilla Public +++ * License, v. 2.0. If a copy of the MPL was not distributed with this +++ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ +++ +++/* +++** File: prtypes.h +++** Description: Definitions of NSPR's basic types +++** +++** Prototypes and macros used to make up for deficiencies that we have found +++** in ANSI environments. +++** +++** Since we do not wrap and all the other standard headers, authors +++** of portable code will not know in general that they need these definitions. +++** Instead of requiring these authors to find the dependent uses in their code +++** and take the following steps only in those C files, we take steps once here +++** for all C files. +++**/ +++ +++#ifndef prtypes_h___ +++#define prtypes_h___ +++ +++#ifdef MDCPUCFG +++#include MDCPUCFG +++#else +++#include "prcpucfg.h" +++#endif +++ +++#include +++ +++/*********************************************************************** +++** MACROS: PR_EXTERN +++** PR_IMPLEMENT +++** DESCRIPTION: +++** These are only for externally visible routines and globals. For +++** internal routines, just use "extern" for type checking and that +++** will not export internal cross-file or forward-declared symbols. +++** Define a macro for declaring procedures return types. We use this to +++** deal with windoze specific type hackery for DLL definitions. Use +++** PR_EXTERN when the prototype for the method is declared. Use +++** PR_IMPLEMENT for the implementation of the method. +++** +++** Example: +++** in dowhim.h +++** PR_EXTERN( void ) DoWhatIMean( void ); +++** in dowhim.c +++** PR_IMPLEMENT( void ) DoWhatIMean( void ) { return; } +++** +++** +++***********************************************************************/ +++#if defined(WIN32) +++ +++#define PR_EXPORT(__type) extern __declspec(dllexport) __type +++#define PR_EXPORT_DATA(__type) extern __declspec(dllexport) __type +++#define PR_IMPORT(__type) __declspec(dllimport) __type +++#define PR_IMPORT_DATA(__type) __declspec(dllimport) __type +++ +++#define PR_EXTERN(__type) extern __declspec(dllexport) __type +++#define PR_IMPLEMENT(__type) __declspec(dllexport) __type +++#define PR_EXTERN_DATA(__type) extern __declspec(dllexport) __type +++#define PR_IMPLEMENT_DATA(__type) __declspec(dllexport) __type +++ +++#define PR_CALLBACK +++#define PR_CALLBACK_DECL +++#define PR_STATIC_CALLBACK(__x) static __x +++ +++#elif defined(XP_OS2) && defined(__declspec) +++ +++#define PR_EXPORT(__type) extern __declspec(dllexport) __type +++#define PR_EXPORT_DATA(__type) extern __declspec(dllexport) __type +++#define PR_IMPORT(__type) extern __declspec(dllimport) __type +++#define PR_IMPORT_DATA(__type) extern __declspec(dllimport) __type +++ +++#define PR_EXTERN(__type) extern __declspec(dllexport) __type +++#define PR_IMPLEMENT(__type) __declspec(dllexport) __type +++#define PR_EXTERN_DATA(__type) extern __declspec(dllexport) __type +++#define PR_IMPLEMENT_DATA(__type) __declspec(dllexport) __type +++ +++#define PR_CALLBACK +++#define PR_CALLBACK_DECL +++#define PR_STATIC_CALLBACK(__x) static __x +++ +++#else /* Unix */ +++ +++/* GCC 3.3 and later support the visibility attribute. */ +++#if (__GNUC__ >= 4) || \ +++ (__GNUC__ == 3 && __GNUC_MINOR__ >= 3) +++#define PR_VISIBILITY_DEFAULT __attribute__((visibility("default"))) +++#else +++#define PR_VISIBILITY_DEFAULT +++#endif +++ +++#define PR_EXPORT(__type) extern PR_VISIBILITY_DEFAULT __type +++#define PR_EXPORT_DATA(__type) extern PR_VISIBILITY_DEFAULT __type +++#define PR_IMPORT(__type) extern PR_VISIBILITY_DEFAULT __type +++#define PR_IMPORT_DATA(__type) extern PR_VISIBILITY_DEFAULT __type +++ +++#define PR_EXTERN(__type) extern PR_VISIBILITY_DEFAULT __type +++#define PR_IMPLEMENT(__type) PR_VISIBILITY_DEFAULT __type +++#define PR_EXTERN_DATA(__type) extern PR_VISIBILITY_DEFAULT __type +++#define PR_IMPLEMENT_DATA(__type) PR_VISIBILITY_DEFAULT __type +++#define PR_CALLBACK +++#define PR_CALLBACK_DECL +++#define PR_STATIC_CALLBACK(__x) static __x +++ +++#endif +++ +++#if defined(_NSPR_BUILD_) +++#define NSPR_API(__type) PR_EXPORT(__type) +++#define NSPR_DATA_API(__type) PR_EXPORT_DATA(__type) +++#else +++#define NSPR_API(__type) PR_IMPORT(__type) +++#define NSPR_DATA_API(__type) PR_IMPORT_DATA(__type) +++#endif +++ +++/*********************************************************************** +++** MACROS: PR_BEGIN_MACRO +++** PR_END_MACRO +++** DESCRIPTION: +++** Macro body brackets so that macros with compound statement definitions +++** behave syntactically more like functions when called. +++***********************************************************************/ +++#define PR_BEGIN_MACRO do { +++#define PR_END_MACRO } while (0) +++ +++/*********************************************************************** +++** MACROS: PR_BEGIN_EXTERN_C +++** PR_END_EXTERN_C +++** DESCRIPTION: +++** Macro shorthands for conditional C++ extern block delimiters. +++***********************************************************************/ +++#ifdef __cplusplus +++#define PR_BEGIN_EXTERN_C extern "C" { +++#define PR_END_EXTERN_C } +++#else +++#define PR_BEGIN_EXTERN_C +++#define PR_END_EXTERN_C +++#endif +++ +++/*********************************************************************** +++** MACROS: PR_BIT +++** PR_BITMASK +++** DESCRIPTION: +++** Bit masking macros. XXX n must be <= 31 to be portable +++***********************************************************************/ +++#define PR_BIT(n) ((PRUint32)1 << (n)) +++#define PR_BITMASK(n) (PR_BIT(n) - 1) +++ +++/*********************************************************************** +++** MACROS: PR_ROUNDUP +++** PR_MIN +++** PR_MAX +++** PR_ABS +++** DESCRIPTION: +++** Commonly used macros for operations on compatible types. +++***********************************************************************/ +++#define PR_ROUNDUP(x,y) ((((x)+((y)-1))/(y))*(y)) +++#define PR_MIN(x,y) ((x)<(y)?(x):(y)) +++#define PR_MAX(x,y) ((x)>(y)?(x):(y)) +++#define PR_ABS(x) ((x)<0?-(x):(x)) +++ +++/*********************************************************************** +++** MACROS: PR_ARRAY_SIZE +++** DESCRIPTION: +++** The number of elements in an array. +++***********************************************************************/ +++#define PR_ARRAY_SIZE(a) (sizeof(a)/sizeof((a)[0])) +++ +++PR_BEGIN_EXTERN_C +++ +++/* +++** Starting in NSPR 4.9.5, NSPR's exact-width integer types should match +++** the exact-width integer types defined in . This allows sloppy +++** code to use PRInt{N} and int{N}_t interchangeably. +++** +++** The 8-bit and 16-bit integer types can only be defined using char and +++** short. All platforms define the 32-bit integer types using int. So only +++** the 64-bit integer types could be defined differently. +++** +++** NSPR's original strategy was to use the "shortest" 64-bit integer type: +++** if long is 64-bit, then prefer it over long long. This strategy is also +++** used by Linux/glibc, FreeBSD, and NetBSD. +++** +++** Other platforms use a different strategy: simply define the 64-bit +++** integer types using long long. We define the PR_ALTERNATE_INT64_TYPEDEF +++** macro on these platforms. Note that PR_ALTERNATE_INT64_TYPEDEF is for +++** internal use by NSPR headers only. Do not define or test this macro in +++** your code. +++** +++** NOTE: NSPR can't use because C99 requires C++ code to define +++** __STDC_LIMIT_MACROS and __STDC_CONSTANT_MACROS to make all the macros +++** defined in available. This strange requirement is gone in +++** C11. When most platforms ignore this C99 requirement, NSPR will be able +++** to use . A patch to do that is in NSPR bug 634793. +++*/ +++ +++#if defined(__APPLE__) || defined(__OpenBSD__) +++#define PR_ALTERNATE_INT64_TYPEDEF +++#endif +++ +++/************************************************************************ +++** TYPES: PRUint8 +++** PRInt8 +++** DESCRIPTION: +++** The int8 types are known to be 8 bits each. There is no type that +++** is equivalent to a plain "char". +++************************************************************************/ +++#if PR_BYTES_PER_BYTE == 1 +++typedef unsigned char PRUint8; +++/* +++** There are two scenarios that require us to define PRInt8 as type 'char'. +++** (1) +++** Some cfront-based C++ compilers do not like 'signed char' and +++** issue the warning message: +++** warning: "signed" not implemented (ignored) +++** For these compilers, we have to define PRInt8 as plain 'char'. +++** Make sure that plain 'char' is indeed signed under these compilers. +++** (2) +++** Mozilla C++ code expects the PRInt{N} and int{N}_t types to match (see bug +++** 634793). If a platform defines int8_t as 'char', but NSPR defines it as +++** 'signed char', it results in a type mismatch. +++** On such platforms we define PRInt8 as 'char' to avoid the mismatch. +++*/ +++#if (defined(HPUX) && defined(__cplusplus) /* reason 1*/ \ +++ && !defined(__GNUC__) && __cplusplus < 199707L) \ +++ || (defined(SCO) && defined(__cplusplus) /* reason 1 */ \ +++ && !defined(__GNUC__) && __cplusplus == 1L) \ +++ || (defined(__sun) && defined(__cplusplus)) /* reason 2 */ +++typedef char PRInt8; +++#else +++typedef signed char PRInt8; +++#endif +++#else +++#error No suitable type for PRInt8/PRUint8 +++#endif +++ +++/************************************************************************ +++ * MACROS: PR_INT8_MAX +++ * PR_INT8_MIN +++ * PR_UINT8_MAX +++ * DESCRIPTION: +++ * The maximum and minimum values of a PRInt8 or PRUint8. +++************************************************************************/ +++ +++#define PR_INT8_MAX 127 +++#define PR_INT8_MIN (-128) +++#define PR_UINT8_MAX 255U +++ +++/************************************************************************ +++** TYPES: PRUint16 +++** PRInt16 +++** DESCRIPTION: +++** The int16 types are known to be 16 bits each. +++************************************************************************/ +++#if PR_BYTES_PER_SHORT == 2 +++typedef unsigned short PRUint16; +++typedef short PRInt16; +++#else +++#error No suitable type for PRInt16/PRUint16 +++#endif +++ +++/************************************************************************ +++ * MACROS: PR_INT16_MAX +++ * PR_INT16_MIN +++ * PR_UINT16_MAX +++ * DESCRIPTION: +++ * The maximum and minimum values of a PRInt16 or PRUint16. +++************************************************************************/ +++ +++#define PR_INT16_MAX 32767 +++#define PR_INT16_MIN (-32768) +++#define PR_UINT16_MAX 65535U +++ +++/************************************************************************ +++** TYPES: PRUint32 +++** PRInt32 +++** DESCRIPTION: +++** The int32 types are known to be 32 bits each. +++************************************************************************/ +++#if PR_BYTES_PER_INT == 4 +++typedef unsigned int PRUint32; +++typedef int PRInt32; +++#define PR_INT32(x) x +++#define PR_UINT32(x) x ## U +++#elif PR_BYTES_PER_LONG == 4 +++typedef unsigned long PRUint32; +++typedef long PRInt32; +++#define PR_INT32(x) x ## L +++#define PR_UINT32(x) x ## UL +++#else +++#error No suitable type for PRInt32/PRUint32 +++#endif +++ +++/************************************************************************ +++ * MACROS: PR_INT32_MAX +++ * PR_INT32_MIN +++ * PR_UINT32_MAX +++ * DESCRIPTION: +++ * The maximum and minimum values of a PRInt32 or PRUint32. +++************************************************************************/ +++ +++#define PR_INT32_MAX PR_INT32(2147483647) +++#define PR_INT32_MIN (-PR_INT32_MAX - 1) +++#define PR_UINT32_MAX PR_UINT32(4294967295) +++ +++/************************************************************************ +++** TYPES: PRUint64 +++** PRInt64 +++** DESCRIPTION: +++** The int64 types are known to be 64 bits each. Care must be used when +++** declaring variables of type PRUint64 or PRInt64. Different hardware +++** architectures and even different compilers have varying support for +++** 64 bit values. The only guaranteed portability requires the use of +++** the LL_ macros (see prlong.h). +++** +++** MACROS: PR_INT64 +++** PR_UINT64 +++** DESCRIPTION: +++** The PR_INT64 and PR_UINT64 macros provide a portable way for +++** specifying 64-bit integer constants. They can only be used if +++** PRInt64 and PRUint64 are defined as compiler-supported 64-bit +++** integer types (i.e., if HAVE_LONG_LONG is defined, which is true +++** for all the supported compilers topday). If PRInt64 and PRUint64 +++** are defined as structs, the LL_INIT macro defined in prlong.h has +++** to be used. +++** +++** MACROS: PR_INT64_MAX +++** PR_INT64_MIN +++** PR_UINT64_MAX +++** DESCRIPTION: +++** The maximum and minimum values of a PRInt64 or PRUint64. +++************************************************************************/ +++#ifdef HAVE_LONG_LONG +++/* Keep this in sync with prlong.h. */ +++#if PR_BYTES_PER_LONG == 8 && !defined(PR_ALTERNATE_INT64_TYPEDEF) +++typedef long PRInt64; +++typedef unsigned long PRUint64; +++#define PR_INT64(x) x ## L +++#define PR_UINT64(x) x ## UL +++#elif defined(WIN32) && !defined(__GNUC__) +++typedef __int64 PRInt64; +++typedef unsigned __int64 PRUint64; +++#define PR_INT64(x) x ## i64 +++#define PR_UINT64(x) x ## ui64 +++#else +++typedef long long PRInt64; +++typedef unsigned long long PRUint64; +++#define PR_INT64(x) x ## LL +++#define PR_UINT64(x) x ## ULL +++#endif /* PR_BYTES_PER_LONG == 8 */ +++ +++#define PR_INT64_MAX PR_INT64(0x7fffffffffffffff) +++#define PR_INT64_MIN (-PR_INT64_MAX - 1) +++#define PR_UINT64_MAX PR_UINT64(-1) +++#else /* !HAVE_LONG_LONG */ +++typedef struct { +++#ifdef IS_LITTLE_ENDIAN +++ PRUint32 lo, hi; +++#else +++ PRUint32 hi, lo; +++#endif +++} PRInt64; +++typedef PRInt64 PRUint64; +++ +++#define PR_INT64_MAX (PRInt64){0x7fffffff, 0xffffffff} +++#define PR_INT64_MIN (PRInt64){0xffffffff, 0xffffffff} +++#define PR_UINT64_MAX (PRUint64){0xffffffff, 0xffffffff} +++ +++#endif /* !HAVE_LONG_LONG */ +++ +++/************************************************************************ +++** TYPES: PRUintn +++** PRIntn +++** DESCRIPTION: +++** The PRIntn types are most appropriate for automatic variables. They are +++** guaranteed to be at least 16 bits, though various architectures may +++** define them to be wider (e.g., 32 or even 64 bits). These types are +++** never valid for fields of a structure. +++************************************************************************/ +++#if PR_BYTES_PER_INT >= 2 +++typedef int PRIntn; +++typedef unsigned int PRUintn; +++#else +++#error 'sizeof(int)' not sufficient for platform use +++#endif +++ +++/************************************************************************ +++** TYPES: PRFloat64 +++** DESCRIPTION: +++** NSPR's floating point type is always 64 bits. +++************************************************************************/ +++typedef double PRFloat64; +++ +++/************************************************************************ +++** TYPES: PRSize +++** DESCRIPTION: +++** A type for representing the size of objects. +++************************************************************************/ +++typedef size_t PRSize; +++ +++ +++/************************************************************************ +++** TYPES: PROffset32, PROffset64 +++** DESCRIPTION: +++** A type for representing byte offsets from some location. +++************************************************************************/ +++typedef PRInt32 PROffset32; +++typedef PRInt64 PROffset64; +++ +++/************************************************************************ +++** TYPES: PRPtrDiff +++** DESCRIPTION: +++** A type for pointer difference. Variables of this type are suitable +++** for storing a pointer or pointer subtraction. +++************************************************************************/ +++typedef ptrdiff_t PRPtrdiff; +++ +++/************************************************************************ +++** TYPES: PRUptrdiff +++** DESCRIPTION: +++** A type for pointer difference. Variables of this type are suitable +++** for storing a pointer or pointer sutraction. +++************************************************************************/ +++#ifdef _WIN64 +++typedef PRUint64 PRUptrdiff; +++#else +++typedef unsigned long PRUptrdiff; +++#endif +++ +++/************************************************************************ +++** TYPES: PRBool +++** DESCRIPTION: +++** Use PRBool for variables and parameter types. Use PR_FALSE and PR_TRUE +++** for clarity of target type in assignments and actual arguments. Use +++** 'if (bool)', 'while (!bool)', '(bool) ? x : y' etc., to test booleans +++** just as you would C int-valued conditions. +++************************************************************************/ +++typedef PRIntn PRBool; +++#define PR_TRUE 1 +++#define PR_FALSE 0 +++ +++/************************************************************************ +++** TYPES: PRPackedBool +++** DESCRIPTION: +++** Use PRPackedBool within structs where bitfields are not desirable +++** but minimum and consistant overhead matters. +++************************************************************************/ +++typedef PRUint8 PRPackedBool; +++ +++/* +++** Status code used by some routines that have a single point of failure or +++** special status return. +++*/ +++typedef enum { PR_FAILURE = -1, PR_SUCCESS = 0 } PRStatus; +++ +++#ifndef __PRUNICHAR__ +++#define __PRUNICHAR__ +++#ifdef WIN32 +++typedef wchar_t PRUnichar; +++#else +++typedef PRUint16 PRUnichar; +++#endif +++#endif +++ +++/* +++** WARNING: The undocumented data types PRWord and PRUword are +++** only used in the garbage collection and arena code. Do not +++** use PRWord and PRUword in new code. +++** +++** A PRWord is an integer that is the same size as a void*. +++** It implements the notion of a "word" in the Java Virtual +++** Machine. (See Sec. 3.4 "Words", The Java Virtual Machine +++** Specification, Addison-Wesley, September 1996. +++** http://java.sun.com/docs/books/vmspec/index.html.) +++*/ +++#ifdef _WIN64 +++typedef PRInt64 PRWord; +++typedef PRUint64 PRUword; +++#else +++typedef long PRWord; +++typedef unsigned long PRUword; +++#endif +++ +++/* +++ * PR_PRETEND_NORETURN, specified at the end of a function declaration, +++ * indicates that for the purposes of static analysis, this function does not +++ * return. (The function definition does not need to be annotated.) +++ * +++ * void PR_Assert(const char *s, const char *file, PRIntn ln) +++ * PR_PRETEND_NORETURN; +++ * +++ * Some static analyzers, like scan-build from clang, can use this information +++ * to eliminate false positives. From the upstream documentation of +++ * scan-build: +++ * This attribute is useful for annotating assertion handlers that actually +++ * can return, but for the purpose of using the analyzer we want to pretend +++ * that such functions do not return. +++ */ +++#ifdef __clang_analyzer__ +++#if __has_extension(attribute_analyzer_noreturn) +++#define PR_PRETEND_NORETURN __attribute__((analyzer_noreturn)) +++#endif +++#endif +++ +++#ifndef PR_PRETEND_NORETURN +++#define PR_PRETEND_NORETURN /* no support */ +++#endif +++ +++/* +++** Compile-time assert. "condition" must be a constant expression. +++** The macro can be used only in places where an "extern" declaration is +++** allowed. +++*/ +++#define PR_STATIC_ASSERT(condition) \ +++ extern void pr_static_assert(int arg[(condition) ? 1 : -1]) +++ +++PR_END_EXTERN_C +++ +++#endif /* prtypes_h___ */ +++ ++--- /dev/null +++++ b/src/jdk.crypto.cryptoki/share/native/libj2pkcs11/pkcs11p.h ++@@ -0,0 +1,24 @@ +++/* This Source Code Form is subject to the terms of the Mozilla Public +++ * License, v. 2.0. If a copy of the MPL was not distributed with this +++ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ +++/* +++ * Copyright (C) 1994-1999 RSA Security Inc. Licence to copy this document +++ * is granted provided that it is identified as "RSA Security Inc. Public-Key +++ * Cryptography Standards (PKCS)" in all material mentioning or referencing +++ * this document. +++ */ +++/* these data types are platform/implementation dependent. */ +++/* +++ * Packing was removed from the shipped RSA header files, even +++ * though it's still needed. put in a central file to help merging.. +++ */ +++ +++#if defined(_WIN32) || defined(_WINDOWS) +++#ifdef __clang__ +++#pragma clang diagnostic ignored "-Wpragma-pack" +++#endif +++#ifdef _MSC_VER +++#pragma warning(disable : 4103) +++#endif +++#pragma pack(push, cryptoki, 1) +++#endif ++--- a/src/jdk.crypto.cryptoki/unix/native/libj2pkcs11/p11_md.h +++++ b/src/jdk.crypto.cryptoki/unix/native/libj2pkcs11/p11_md.h ++@@ -63,11 +63,6 @@ ++ #ifndef _P11_MD_H ++ #define _P11_MD_H 1 ++ ++-#define CK_PTR * ++-#define CK_DEFINE_FUNCTION(returnType, name) returnType name ++-#define CK_DECLARE_FUNCTION(returnType, name) returnType name ++-#define CK_DECLARE_FUNCTION_POINTER(returnType, name) returnType (* name) ++-#define CK_CALLBACK_FUNCTION(returnType, name) returnType (* name) ++ #ifndef NULL_PTR ++ #define NULL_PTR 0 ++ #endif ++--- a/src/jdk.crypto.cryptoki/share/native/libj2pkcs11/pkcs11wrapper.h +++++ b/src/jdk.crypto.cryptoki/share/native/libj2pkcs11/pkcs11wrapper.h ++@@ -172,8 +172,8 @@ ++ #define min(a, b) (((a) < (b)) ? (a) : (b)) ++ #endif ++ ++-#define ckBBoolToJBoolean(x) ((x == TRUE) ? JNI_TRUE : JNI_FALSE); ++-#define jBooleanToCKBBool(x) ((x == JNI_TRUE) ? TRUE : FALSE); +++#define ckBBoolToJBoolean(x) ((x == CK_TRUE) ? JNI_TRUE : JNI_FALSE); +++#define jBooleanToCKBBool(x) ((x == JNI_TRUE) ? CK_TRUE : CK_FALSE); ++ ++ #define ckByteToJByte(x) ((jbyte) x) ++ #define jByteToCKByte(x) ((CK_BYTE) x) ++--- a/src/jdk.crypto.cryptoki/share/native/libj2pkcs11/p11_convert.c +++++ b/src/jdk.crypto.cryptoki/share/native/libj2pkcs11/p11_convert.c ++@@ -1543,7 +1543,9 @@ CK_VOID_PTR jMechParamToCKMechParamPtrSl ++ ckpParamPtr = jX942Dh2DeriveParamToCKX942Dh2DeriveParamPtr(env, jParam, ckpLength); ++ break; ++ // defined by pkcs11.h but we don't support +++#if 0 ++ case CKM_KEA_DERIVE: // CK_KEA_DERIVE_PARAMS +++#endif ++ case CKM_RC2_CBC: // CK_RC2_CBC_PARAMS ++ case CKM_RC2_MAC_GENERAL: // CK_RC2_MAC_GENERAL_PARAMS ++ case CKM_RC5_ECB: // CK_RC5_PARAMS ++--- a/src/jdk.crypto.cryptoki/share/native/libj2pkcs11/p11_util.c +++++ b/src/jdk.crypto.cryptoki/share/native/libj2pkcs11/p11_util.c ++@@ -407,7 +407,9 @@ void freeCKMechanismPtr(CK_MECHANISM_PTR ++ case CKM_TLS_MAC: ++ case CKM_AES_CTR: ++ case CKM_RSA_PKCS_PSS: +++#if 0 ++ case CKM_CAMELLIA_CTR: +++#endif ++ // params do not contain pointers ++ break; ++ default: diff --cc debian/patches/reproducible-build-jmod.diff index 000000000,000000000..9287270de new file mode 100644 --- /dev/null +++ b/debian/patches/reproducible-build-jmod.diff @@@ -1,0 -1,0 +1,39 @@@ ++Description: jlink: Hash of module differs to expected hash recorded in java.base ++ The cause is the use of dh_strip_nondeterminism late in the build ++ process. This reorganises the jmod files, which in turn changes their ++ SHA256 checksums. This would not be a problem, except that the ++ checksums are saved in java.base.jmod *before* the use of ++ dh_strip_nondeterminism. Performing this stripping immediately after ++ each jmod file is created results in the checksums being consistent ++ throughout. ++Author: Julian Gilbey ++Bug-Debian: https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=944738 ++Forwarded: not-needed ++ ++--- a/make/CreateJmods.gmk +++++ b/make/CreateJmods.gmk ++@@ -230,6 +230,15 @@ ++ ++ # Create jmods in the support dir and then move them into place to keep the ++ # module path in $(IMAGES_OUTPUTDIR)/jmods valid at all times. +++# strip-nondeterminism requires the same timestamp as +++# dh_strip_nondeterminism uses, so we determine this first. +++# Fall back to the original behavior if the tools are missing for backports +++DSN_TIMESTAMP := $(shell perl -MDebian::Debhelper::Dh_Lib -e 'print get_source_date_epoch()' 2>/dev/null) +++ifneq (,$(DSN_TIMESTAMP)) +++ ifneq (,$(wildcard /bin/strip-nondeterminism /usr/bin/strip-nondeterminism)) +++ use_strip_ndt = yes +++ endif +++endif ++ $(eval $(call SetupExecute, create_$(JMOD_FILE), \ ++ WARN := Creating $(INTERIM_MSG)$(JMOD_FILE), \ ++ DEPS := $(DEPS), \ ++@@ -241,7 +250,7 @@ ++ --module-path $(JMODS_DIR) $(JMOD_FLAGS) \ ++ $(JMOD_SOURCE_DATE) \ ++ $(JMODS_SUPPORT_DIR)/$(JMOD_FILE), \ ++- POST_COMMAND := $(MV) $(JMODS_SUPPORT_DIR)/$(JMOD_FILE) $(JMODS_DIR)/$(JMOD_FILE), \ +++ POST_COMMAND := $(if $(use_strip_ndt),strip-nondeterminism --timestamp $(DSN_TIMESTAMP) $(JMODS_SUPPORT_DIR)/$(JMOD_FILE) && )$(MV) $(JMODS_SUPPORT_DIR)/$(JMOD_FILE) $(JMODS_DIR)/$(JMOD_FILE), \ ++ )) ++ ++ TARGETS += $(create_$(JMOD_FILE)) diff --cc debian/patches/reproducible-character-data.diff index 000000000,000000000..140c52d6b new file mode 100644 --- /dev/null +++ b/debian/patches/reproducible-character-data.diff @@@ -1,0 -1,0 +1,14 @@@ ++Description: Makes the generated character data classes reproducible (removes a timestamp and trims a build path captured in the comments) ++Author: Emmanuel Bourg ++Forwarded: no ++--- a/make/jdk/src/classes/build/tools/generatecharacter/GenerateCharacter.java +++++ b/make/jdk/src/classes/build/tools/generatecharacter/GenerateCharacter.java ++@@ -1729,7 +1729,7 @@ ++ } ++ commentStart = (Csyntax ? "/*" : "//"); ++ commentEnd = (Csyntax ? " */" : ""); ++- commandLineDescription = desc.toString().replace("\\", "\\\\"); +++ commandLineDescription = desc.toString().replace("\\", "\\\\").replace(System.getProperty("user.dir").replace("make/gensrc", ""), ""); ++ } ++ ++ private static void searchBins(long[] map, int binsOccupied) throws Exception { diff --cc debian/patches/reproducible-copyright-headers.diff index 000000000,000000000..bba2371d6 new file mode 100644 --- /dev/null +++ b/debian/patches/reproducible-copyright-headers.diff @@@ -1,0 -1,0 +1,64 @@@ ++Description: Makes the generated copyright headers reproducible ++Author: Emmanuel Bourg ++Forwarded: no ++--- a/make/jdk/src/classes/build/tools/cldrconverter/CopyrightHeaders.java +++++ b/make/jdk/src/classes/build/tools/cldrconverter/CopyrightHeaders.java ++@@ -26,6 +26,7 @@ ++ package build.tools.cldrconverter; ++ ++ import java.util.Calendar; +++import java.util.Date; ++ import java.util.GregorianCalendar; ++ import java.util.Locale; ++ import java.util.TimeZone; ++@@ -146,8 +147,14 @@ class CopyrightHeaders { ++ } ++ ++ private static int getYear() { ++- return new GregorianCalendar(TimeZone.getTimeZone("America/Los_Angeles"), ++- Locale.US).get(Calendar.YEAR); +++ Date date = new Date(); +++ if (System.getenv("SOURCE_DATE_EPOCH") != null) { +++ date = new Date(1000 * Long.valueOf(System.getenv("SOURCE_DATE_EPOCH"))); +++ } +++ +++ GregorianCalendar calendar = new GregorianCalendar(TimeZone.getTimeZone("UTC"), Locale.ENGLISH); +++ calendar.setTime(date); +++ return calendar.get(Calendar.YEAR); ++ } ++ ++ // no instantiation ++--- a/make/jdk/src/classes/build/tools/generatelsrequivmaps/EquivMapsGenerator.java +++++ b/make/jdk/src/classes/build/tools/generatelsrequivmaps/EquivMapsGenerator.java ++@@ -34,9 +34,13 @@ import java.time.ZoneId; ++ import java.time.ZonedDateTime; ++ import java.util.ArrayList; ++ import java.util.Arrays; +++import java.util.Calendar; +++import java.util.Date; +++import java.util.GregorianCalendar; ++ import java.util.List; ++ import java.util.Locale; ++ import java.util.Map; +++import java.util.TimeZone; ++ import java.util.TreeMap; ++ import java.util.stream.Collectors; ++ ++@@ -246,8 +250,15 @@ public class EquivMapsGenerator { ++ + "}"; ++ ++ private static String getOpenJDKCopyright() { ++- int year = ZonedDateTime.now(ZoneId ++- .of("America/Los_Angeles")).getYear(); +++ Date date = new Date(); +++ if (System.getenv("SOURCE_DATE_EPOCH") != null) { +++ date = new Date(1000 * Long.valueOf(System.getenv("SOURCE_DATE_EPOCH"))); +++ } +++ +++ GregorianCalendar calendar = new GregorianCalendar(TimeZone.getTimeZone("UTC"), Locale.ENGLISH); +++ calendar.setTime(date); +++ +++ int year = calendar.get(Calendar.YEAR); ++ return String.format(Locale.US, COPYRIGHT, year); ++ } ++ diff --cc debian/patches/reproducible-javadoc-timestamp.diff index 000000000,000000000..8cabed1f9 new file mode 100644 --- /dev/null +++ b/debian/patches/reproducible-javadoc-timestamp.diff @@@ -1,0 -1,0 +1,41 @@@ ++Description: Makes the timestamp in the javadoc files reproducible when SOURCE_DATE_EPOCH is specified ++Author: Emmanuel Bourg ++Forwarded: no ++--- a/src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/markup/Head.java +++++ b/src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/markup/Head.java ++@@ -239,6 +239,9 @@ public class Head { ++ */ ++ public Content toContent() { ++ Date now = showTimestamp ? calendar.getTime() : null; +++ if (now != null && System.getenv("SOURCE_DATE_EPOCH") != null) { +++ now = new Date(1000 * Long.parseLong(System.getenv("SOURCE_DATE_EPOCH"))); +++ } ++ ++ HtmlTree tree = new HtmlTree(HtmlTag.HEAD); ++ tree.add(getGeneratedBy(showTimestamp, now)); ++@@ -250,6 +253,9 @@ public class Head { ++ ++ if (showTimestamp) { ++ SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd"); +++ if (System.getenv("SOURCE_DATE_EPOCH") != null) { +++ dateFormat.setTimeZone(TimeZone.getTimeZone("UTC")); +++ } ++ tree.add(HtmlTree.META("dc.created", dateFormat.format(now))); ++ } ++ ++@@ -282,7 +288,14 @@ public class Head { ++ private Comment getGeneratedBy(boolean timestamp, Date now) { ++ String text = "Generated by javadoc"; // marker string, deliberately not localized ++ if (timestamp) { ++- text += " ("+ docletVersion + ") on " + now; +++ text += " ("+ docletVersion + ") on "; +++ if (System.getenv("SOURCE_DATE_EPOCH") == null) { +++ text += now; +++ } else { +++ SimpleDateFormat fmt = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss z"); +++ fmt.setTimeZone(TimeZone.getTimeZone("UTC")); +++ text += fmt.format(now); +++ } ++ } ++ return new Comment(text); ++ } diff --cc debian/patches/reproducible-module-info.diff index 000000000,000000000..1d18991bc new file mode 100644 --- /dev/null +++ b/debian/patches/reproducible-module-info.diff @@@ -1,0 -1,0 +1,18 @@@ ++Description: Makes the generated module-info.java files reproducible (removes a captured build path) ++Author: Emmanuel Bourg ++Forwarded: no ++--- a/make/jdk/src/classes/build/tools/module/GenModuleInfoSource.java +++++ b/make/jdk/src/classes/build/tools/module/GenModuleInfoSource.java ++@@ -156,9 +156,10 @@ ++ if (l.trim().startsWith("module ")) { ++ if (debug) { ++ // print URI rather than file path to avoid escape ++- writer.format(" // source file: %s%n", sourceFile.toUri()); +++ String buildPath = System.getProperty("user.dir").replaceAll("make$", ""); +++ writer.format(" // source file: %s%n", sourceFile.toUri().toString().replace(buildPath, "")); ++ for (Path file : extraFiles) { ++- writer.format(" // %s%n", file.toUri()); +++ writer.format(" // %s%n", file.toUri().toString().replace(buildPath, "")); ++ } ++ } ++ break; diff --cc debian/patches/reproducible-properties-timestamp.diff index 000000000,000000000..c0ec2672f new file mode 100644 --- /dev/null +++ b/debian/patches/reproducible-properties-timestamp.diff @@@ -1,0 -1,0 +1,37 @@@ ++Description: Makes the timestamp in the properties files header reproducible when SOURCE_DATE_EPOCH is specified ++Author: Emmanuel Bourg ++Forwarded: no ++--- a/src/java.base/share/classes/java/util/Properties.java +++++ b/src/java.base/share/classes/java/util/Properties.java ++@@ -903,7 +903,7 @@ ++ if (comments != null) { ++ writeComments(bw, comments); ++ } ++- bw.write("#" + new Date().toString()); +++ bw.write("#" + getFormattedTimestamp()); ++ bw.newLine(); ++ synchronized (this) { ++ for (Map.Entry e : entrySet()) { ++@@ -1555,4 +1555,22 @@ ++ } ++ this.map = map; ++ } +++ +++ /** +++ * Returns a formatted timestamp to be used in the properties file header. +++ * The date used is the current date, unless the SOURCE_DATE_EPOCH +++ * environment variable is specified. In this case the format used is +++ * locale and timezone insensitive to ensure the output is reproducible. +++ */ +++ private String getFormattedTimestamp() { +++ if (System.getenv("SOURCE_DATE_EPOCH") == null) { +++ return new Date().toString(); +++ } else { +++ // Use the SOURCE_DATE_EPOCH timestamp and make the format locale/timezone insensitive +++ java.text.SimpleDateFormat fmt = new java.text.SimpleDateFormat("yyyy-MM-dd HH:mm:ss z", java.util.Locale.ENGLISH); +++ fmt.setTimeZone(java.util.TimeZone.getTimeZone("UTC")); +++ Date date = new Date(1000 * Long.parseLong(System.getenv("SOURCE_DATE_EPOCH"))); +++ return fmt.format(date); +++ } +++ } ++ } diff --cc debian/patches/s390x-opt.diff index 000000000,000000000..33ea6ffd4 new file mode 100644 --- /dev/null +++ b/debian/patches/s390x-opt.diff @@@ -1,0 -1,0 +1,12 @@@ ++--- a/make/autoconf/flags-cflags.m4 +++++ b/make/autoconf/flags-cflags.m4 ++@@ -739,6 +739,9 @@ AC_DEFUN([FLAGS_SETUP_CFLAGS_CPU_DEP], ++ fi ++ elif test "x$FLAGS_CPU" = xs390x; then ++ $1_CFLAGS_CPU="-mbackchain -march=z10" +++ if test "x$VENDOR_FLAGS_CPU" != x; then +++ $1_CFLAGS_CPU="-mbackchain $VENDOR_FLAGS_CPU" +++ fi ++ fi ++ ++ if test "x$FLAGS_CPU_ARCH" != xarm && test "x$FLAGS_CPU_ARCH" != xppc; then diff --cc debian/patches/series index 000000000,000000000..826f7e386 new file mode 100644 --- /dev/null +++ b/debian/patches/series @@@ -1,0 -1,0 +1,39 @@@ ++system-pcsclite.diff ++hotspot-mips-align.diff ++icc_loading_with_symlink.diff ++icedtea-override-redirect-compiz.diff ++libpcsclite-dlopen.diff ++default-jvm-cfg.diff ++adlc-parser.diff ++multiple-pkcs11-library-init.diff ++s390x-opt.diff ++jdk-getAccessibleValue.diff ++jtreg-location.diff ++jdk-i18n-pt_BR.diff ++8199220.diff ++machine-flag.diff ++zero-x32.diff ++hotspot-disable-exec-shield-workaround.diff ++atk-wrapper-security.diff ++dnd-files.diff ++m68k-support.diff ++reproducible-properties-timestamp.diff ++# reproducible-javadoc-timestamp.diff ++make-debug-print.diff ++Don-t-optimize-fdlibm-fork-for-Zero-on-linux-sparc-Z.patch ++keep-gtk2-as-default.patch ++reproducible-character-data.diff ++reproducible-module-info.diff ++#reproducible-copyright-headers.diff ++reproducible-build-jmod.diff ++mips.diff ++#nspr+nss-headers.diff ++build_gtest.patch ++update-assertion-for-armhf.patch ++misalign-pointer-for-armhf.patch ++log-generated-classes-test.patch ++update-permission-test.patch ++ldap-timeout-test-use-ip.patch ++test-use-ip-address.patch ++exclude-broken-tests.patch ++8276799.diff diff --cc debian/patches/system-pcsclite.diff index 000000000,000000000..5d5aeea4b new file mode 100644 --- /dev/null +++ b/debian/patches/system-pcsclite.diff @@@ -1,0 -1,0 +1,195 @@@ ++--- a/make/autoconf/lib-bundled.m4 +++++ b/make/autoconf/lib-bundled.m4 ++@@ -41,6 +41,7 @@ ++ LIB_SETUP_ZLIB ++ LIB_SETUP_LCMS ++ LIB_SETUP_HARFBUZZ +++ LIB_SETUP_PCSCLITE ++ ]) ++ ++ ################################################################################ ++@@ -307,3 +308,41 @@ ++ AC_SUBST(HARFBUZZ_CFLAGS) ++ AC_SUBST(HARFBUZZ_LIBS) ++ ]) +++ +++################################################################################ +++# Setup pcsclite +++################################################################################ +++AC_DEFUN_ONCE([LIB_SETUP_PCSCLITE], +++[ +++ AC_ARG_WITH(pcsclite, [AS_HELP_STRING([--with-pcsclite], +++ [use pcsclite from build system or OpenJDK source (system, bundled) @<:@bundled@:>@])]) +++ +++ AC_MSG_CHECKING([for which pcsclite to use]) +++ +++ # default is bundled +++ DEFAULT_PCSCLITE=bundled +++ # if user didn't specify, use DEFAULT_PCSCLITE +++ if test "x${with_pcsclite}" = "x"; then +++ with_libpng=${DEFAULT_PCSCLITE} +++ fi +++ +++ if test "x${with_pcsclite}" = "xbundled"; then +++ USE_EXTERNAL_PCSCLITE=false +++ AC_MSG_RESULT([bundled]) +++ elif test "x${with_pcsclite}" = "xsystem"; then +++ PKG_CHECK_MODULES(PCSCLITE, libpcsclite, +++ [ PCSCLITE_FOUND=yes ], +++ [ PCSCLITE_FOUND=no ]) +++ if test "x${PCSCLITE_FOUND}" = "xyes"; then +++ USE_EXTERNAL_PCSCLITE=true +++ AC_MSG_RESULT([system]) +++ else +++ AC_MSG_RESULT([system not found]) +++ AC_MSG_ERROR([--with-pcsclite=system specified, but no pcsclite found!]) +++ fi +++ else +++ AC_MSG_ERROR([Invalid value of --with-pcsclite: ${with_pcsclite}, use 'system' or 'bundled']) +++ fi +++ +++ AC_SUBST(USE_EXTERNAL_PCSCLITE) +++]) ++--- a/make/modules/java.smartcardio/Lib.gmk +++++ b/make/modules/java.smartcardio/Lib.gmk ++@@ -30,12 +30,12 @@ ++ $(eval $(call SetupJdkLibrary, BUILD_LIBJ2PCSC, \ ++ NAME := j2pcsc, \ ++ CFLAGS := $(CFLAGS_JDKLIB), \ ++- CFLAGS_unix := -D__sun_jdk, \ ++- EXTRA_HEADER_DIRS := libj2pcsc/MUSCLE, \ +++ CFLAGS_unix := -D__sun_jdk -DUSE_SYSTEM_LIBPCSCLITE, \ +++ EXTRA_HEADER_DIRS := /usr/include/PCSC, \ ++ OPTIMIZATION := LOW, \ ++ LDFLAGS := $(LDFLAGS_JDKLIB) \ ++ $(call SET_SHARED_LIBRARY_ORIGIN), \ ++- LIBS_unix := $(LIBDL), \ +++ LIBS_unix := -lpcsclite $(LIBDL), \ ++ LIBS_windows := winscard.lib, \ ++ )) ++ ++--- a/make/autoconf/spec.gmk.in +++++ b/make/autoconf/spec.gmk.in ++@@ -774,6 +774,7 @@ ++ # Build setup ++ USE_EXTERNAL_LIBJPEG:=@USE_EXTERNAL_LIBJPEG@ ++ USE_EXTERNAL_LIBGIF:=@USE_EXTERNAL_LIBGIF@ +++USE_EXTERNAL_LIBPCSCLITE:=@USE_EXTERNAL_LIBPCSCLITE@ ++ USE_EXTERNAL_LIBZ:=@USE_EXTERNAL_LIBZ@ ++ LIBZ_CFLAGS:=@LIBZ_CFLAGS@ ++ LIBZ_LIBS:=@LIBZ_LIBS@ ++--- a/src/java.smartcardio/unix/native/libj2pcsc/pcsc_md.c +++++ b/src/java.smartcardio/unix/native/libj2pcsc/pcsc_md.c ++@@ -36,6 +36,7 @@ ++ ++ #include "pcsc_md.h" ++ +++#ifndef USE_SYSTEM_LIBPCSCLITE ++ void *hModule; ++ FPTR_SCardEstablishContext scardEstablishContext; ++ FPTR_SCardConnect scardConnect; ++@@ -47,6 +48,7 @@ ++ FPTR_SCardBeginTransaction scardBeginTransaction; ++ FPTR_SCardEndTransaction scardEndTransaction; ++ FPTR_SCardControl scardControl; +++#endif ++ ++ /* ++ * Throws a Java Exception by name ++@@ -75,7 +77,9 @@ ++ throwByName(env, "java/io/IOException", msg); ++ } ++ +++#ifndef USE_SYSTEM_LIBPCSCLITE ++ void *findFunction(JNIEnv *env, void *hModule, char *functionName) { +++ return NULL; ++ void *fAddress = dlsym(hModule, functionName); ++ if (fAddress == NULL) { ++ char errorMessage[256]; ++@@ -85,9 +89,11 @@ ++ } ++ return fAddress; ++ } +++#endif ++ ++ JNIEXPORT void JNICALL Java_sun_security_smartcardio_PlatformPCSC_initialize ++ (JNIEnv *env, jclass thisClass, jstring jLibName) { +++#ifndef USE_SYSTEM_LIBPCSCLITE ++ const char *libName = (*env)->GetStringUTFChars(env, jLibName, NULL); ++ if (libName == NULL) { ++ throwNullPointerException(env, "PCSC library name is null"); ++@@ -141,4 +147,5 @@ ++ #else ++ scardControl = (FPTR_SCardControl) findFunction(env, hModule, "SCardControl132"); ++ #endif // __APPLE__ +++#endif ++ } ++--- a/src/java.smartcardio/unix/native/libj2pcsc/pcsc_md.h +++++ b/src/java.smartcardio/unix/native/libj2pcsc/pcsc_md.h ++@@ -23,6 +23,8 @@ ++ * questions. ++ */ ++ +++#ifndef USE_SYSTEM_LIBPCSCLITE +++ ++ typedef LONG (*FPTR_SCardEstablishContext)(DWORD dwScope, ++ LPCVOID pvReserved1, ++ LPCVOID pvReserved2, ++@@ -111,3 +113,41 @@ ++ extern FPTR_SCardBeginTransaction scardBeginTransaction; ++ extern FPTR_SCardEndTransaction scardEndTransaction; ++ extern FPTR_SCardControl scardControl; +++ +++#else +++ +++#define CALL_SCardEstablishContext(dwScope, pvReserved1, pvReserved2, phContext) \ +++ (SCardEstablishContext(dwScope, pvReserved1, pvReserved2, phContext)) +++ +++#define CALL_SCardConnect(hContext, szReader, dwSharedMode, dwPreferredProtocols, phCard, pdwActiveProtocols) \ +++ (SCardConnect(hContext, szReader, dwSharedMode, dwPreferredProtocols, phCard, pdwActiveProtocols)) +++ +++#define CALL_SCardDisconnect(hCard, dwDisposition) \ +++ (SCardDisconnect(hCard, dwDisposition)) +++ +++#define CALL_SCardStatus(hCard, mszReaderNames, pcchReaderLen, pdwState, pdwProtocol, pbAtr, pcbAtrLen) \ +++ (SCardStatus(hCard, mszReaderNames, pcchReaderLen, pdwState, pdwProtocol, pbAtr, pcbAtrLen)) +++ +++#define CALL_SCardGetStatusChange(hContext, dwTimeout, rgReaderStates, cReaders) \ +++ (SCardGetStatusChange(hContext, dwTimeout, rgReaderStates, cReaders)) +++ +++#define CALL_SCardTransmit(hCard, pioSendPci, pbSendBuffer, cbSendLength, \ +++ pioRecvPci, pbRecvBuffer, pcbRecvLength) \ +++ (SCardTransmit(hCard, pioSendPci, pbSendBuffer, cbSendLength, \ +++ pioRecvPci, pbRecvBuffer, pcbRecvLength)) +++ +++#define CALL_SCardListReaders(hContext, mszGroups, mszReaders, pcchReaders) \ +++ (SCardListReaders(hContext, mszGroups, mszReaders, pcchReaders)) +++ +++#define CALL_SCardBeginTransaction(hCard) \ +++ (SCardBeginTransaction(hCard)) +++ +++#define CALL_SCardEndTransaction(hCard, dwDisposition) \ +++ (SCardEndTransaction(hCard, dwDisposition)) +++ +++#define CALL_SCardControl(hCard, dwControlCode, pbSendBuffer, cbSendLength, \ +++ pbRecvBuffer, pcbRecvLength, lpBytesReturned) \ +++ (SCardControl(hCard, dwControlCode, pbSendBuffer, cbSendLength, \ +++ pbRecvBuffer, pcbRecvLength, lpBytesReturned)) +++ +++#endif ++--- a/test/jdk/java/security/misc/Versions.java +++++ b/test/jdk/java/security/misc/Versions.java ++@@ -55,12 +55,7 @@ ++ Pattern.compile("list/(?[0-9a-f]+)/public_suffix_list.dat"), ++ "src/java.base/share/legal/public_suffix.md", ++ Pattern.compile("list/(?[0-9a-f]+)/public_suffix_list.dat"), ++- "java.base/public_suffix.md"}, ++- {"src/java.smartcardio/unix/native/libj2pcsc/MUSCLE/pcsclite.h", ++- Pattern.compile("#define PCSCLITE_VERSION_NUMBER +\"(?[0-9\\.]+)\""), ++- "src/java.smartcardio/unix/legal/pcsclite.md", ++- Pattern.compile("## PC/SC Lite v(?[0-9\\.]+)"), ++- "java.smartcardio/pcsclite.md"} +++ "java.base/public_suffix.md"} ++ }; ++ } ++ diff --cc debian/patches/test-use-ip-address.patch index 000000000,000000000..02acac7d0 new file mode 100644 --- /dev/null +++ b/debian/patches/test-use-ip-address.patch @@@ -1,0 -1,0 +1,28 @@@ ++Description: Use IP address in tests ++ Ubuntu build machines are not set up for dns. Use IP addresses to get correct test ++ outcomes. ++Author: Vladimir Petko ++Forwarded: not-needed ++Last-Update: 2023-03-27 ++--- a/test/jdk/java/net/InetAddress/IsReachableViaLoopbackTest.java +++++ b/test/jdk/java/net/InetAddress/IsReachableViaLoopbackTest.java ++@@ -13,7 +13,7 @@ ++ public static void main(String[] args) { ++ try { ++ InetAddress addr = InetAddress.getByName("localhost"); ++- InetAddress remoteAddr = InetAddress.getByName("bugs.openjdk.java.net"); +++ InetAddress remoteAddr = InetAddress.getByName("8.8.8.8"); ++ if (!addr.isReachable(10000)) ++ throw new RuntimeException("Localhost should always be reachable"); ++ NetworkInterface inf = NetworkInterface.getByInetAddress(addr); ++--- a/test/jdk/java/net/InetAddress/getOriginalHostName.java +++++ b/test/jdk/java/net/InetAddress/getOriginalHostName.java ++@@ -39,7 +39,7 @@ ++ SharedSecrets.getJavaNetInetAddressAccess(); ++ ++ public static void main(String[] args) throws Exception { ++- final String HOST = "dummyserver.java.net"; +++ final String HOST = "localhost"; ++ InetAddress ia = null; ++ ia = InetAddress.getByName(HOST); ++ testInetAddress(ia, HOST); diff --cc debian/patches/update-assertion-for-armhf.patch index 000000000,000000000..582974ca2 new file mode 100644 --- /dev/null +++ b/debian/patches/update-assertion-for-armhf.patch @@@ -1,0 -1,0 +1,15 @@@ ++Description: armhf does not support frame unwinding, simplify assertion ++ os::current_frame() is stubbed out on armhf and this test fails. ++ Simplify assertion as there is no "from xxxx" output on armhf. ++Author: Vladimir Petko ++Bug: https://bugs.openjdk.org/browse/JDK-8305480 ++--- a/test/hotspot/jtreg/runtime/NMT/VirtualAllocCommitMerge.java +++++ b/test/hotspot/jtreg/runtime/NMT/VirtualAllocCommitMerge.java ++@@ -320,6 +320,6 @@ ++ public static void checkCommitted(OutputAnalyzer output, long addr, long size, String sizeString) { ++ output.shouldMatch("\\[0x[0]*" + Long.toHexString(addr) + " - 0x[0]*" ++ + Long.toHexString(addr + size) ++- + "\\] committed " + sizeString + " from.*"); +++ + "\\] committed " + sizeString + ".*"); ++ } ++ } diff --cc debian/patches/update-permission-test.patch index 000000000,000000000..d1437d507 new file mode 100644 --- /dev/null +++ b/debian/patches/update-permission-test.patch @@@ -1,0 -1,0 +1,51 @@@ ++Description: Add missing permissions for TestNG 7.7 ++ TestNG has added more configuration properties, allow them in the minimal policy ++Author: Vladimir Petko ++Last-Update: 2023-03-24 ++--- a/test/jdk/java/lang/ProcessHandle/PermissionTest.java +++++ b/test/jdk/java/lang/ProcessHandle/PermissionTest.java ++@@ -219,6 +219,7 @@ ++ permissions.add(new PropertyPermission("testng.mode.dryrun", "read")); ++ permissions.add(new PropertyPermission("testng.report.xml.name", "read")); ++ permissions.add(new PropertyPermission("testng.timezone", "read")); +++ permissions.add(new PropertyPermission("testng.default.verbose", "read")); ++ permissions.add(new ReflectPermission("suppressAccessChecks")); ++ permissions.add(new FilePermission("<>", "execute")); ++ } ++--- a/test/jdk/java/sql/testng/util/TestPolicy.java +++++ b/test/jdk/java/sql/testng/util/TestPolicy.java ++@@ -114,6 +114,8 @@ ++ permissions.add(new PropertyPermission("testng.mode.dryrun", "read")); ++ permissions.add(new PropertyPermission("testng.report.xml.name", "read")); ++ permissions.add(new PropertyPermission("testng.timezone", "read")); +++ permissions.add(new PropertyPermission("testng.default.verbose", "read")); +++ permissions.add(new PropertyPermission("testng.ignore.callback.skip", "read")); ++ permissions.add(new ReflectPermission("suppressAccessChecks")); ++ permissions.add(new FilePermission("<>", ++ "read, write, delete")); ++--- a/test/jaxp/javax/xml/jaxp/libs/jaxp/library/JAXPPolicyManager.java +++++ b/test/jaxp/javax/xml/jaxp/libs/jaxp/library/JAXPPolicyManager.java ++@@ -35,6 +35,7 @@ ++ import java.util.HashMap; ++ import java.util.Map; ++ import java.util.PropertyPermission; +++import java.lang.reflect.ReflectPermission; ++ import java.util.Set; ++ import java.util.StringJoiner; ++ ++@@ -110,6 +111,15 @@ ++ addPermission(new SecurityPermission("setPolicy")); ++ addPermission(new RuntimePermission("setSecurityManager")); ++ addPermission(new PropertyPermission("test.src", "read")); +++ addPermission(new PropertyPermission("testng.show.stack.frames", "read")); +++ addPermission(new PropertyPermission("testng.thread.affinity", "read")); +++ addPermission(new PropertyPermission("testng.memory.friendly", "read")); +++ addPermission(new PropertyPermission("testng.mode.dryrun", "read")); +++ addPermission(new PropertyPermission("testng.report.xml.name", "read")); +++ addPermission(new PropertyPermission("testng.timezone", "read")); +++ addPermission(new PropertyPermission("testng.default.verbose", "read")); +++ addPermission(new PropertyPermission("testng.ignore.callback.skip", "read")); +++ addPermission(new ReflectPermission("suppressAccessChecks")); ++ } ++ ++ /* diff --cc debian/patches/zero-x32.diff index 000000000,000000000..d6387e446 new file mode 100644 --- /dev/null +++ b/debian/patches/zero-x32.diff @@@ -1,0 -1,0 +1,13 @@@ ++--- a/make/autoconf/platform.m4 +++++ b/make/autoconf/platform.m4 ++@@ -41,6 +41,10 @@ ++ VAR_CPU_ARCH=x86 ++ VAR_CPU_BITS=64 ++ VAR_CPU_ENDIAN=little +++ case "$host" in *x32) +++ VAR_CPU=x32 +++ VAR_CPU_BITS=32 +++ esac ++ ;; ++ i?86) ++ VAR_CPU=x86 diff --cc debian/rules index 000000000,000000000..79f2e233a new file mode 100755 --- /dev/null +++ b/debian/rules @@@ -1,0 -1,0 +1,1941 @@@ ++#!/usr/bin/make -f ++# -*- makefile -*- ++ ++# Uncomment this to turn on verbose mode. ++#export DH_VERBOSE=1 ++ ++unexport LANG LC_ALL ++ ++dh_version := $(shell dpkg-query -f '$${Version}\n' -W debhelper | sed -n 's/^\(.\).*/\1/p') ++ ++# using brace expansion and substring replacements (${var:0:2}). ++SHELL = /bin/bash ++ ++# use gnu time in verbose mode if available, otherwise default to the shell builtin ++ifneq (,$(wildcard /usr/bin/time)) ++ TIME = /usr/bin/time -v ++else ++ TIME = time ++endif ++ ++vafilt = $(subst $(2)=,,$(filter $(2)=%,$(1))) ++ ++DPKG_VARS := $(shell dpkg-architecture) ++DEB_HOST_GNU_TYPE ?= $(call vafilt,$(DPKG_VARS),DEB_HOST_GNU_TYPE) ++DEB_BUILD_GNU_TYPE ?= $(call vafilt,$(DPKG_VARS),DEB_BUILD_GNU_TYPE) ++DEB_HOST_ARCH ?= $(call vafilt,$(DPKG_VARS),DEB_HOST_ARCH) ++DEB_BUILD_ARCH ?= $(call vafilt,$(DPKG_VARS),DEB_BUILD_ARCH) ++DEB_HOST_ARCH_CPU ?= $(call vafilt,$(DPKG_VARS),DEB_HOST_ARCH_CPU) ++DEB_HOST_MULTIARCH ?= $(call vafilt,$(DPKG_VARS),DEB_HOST_MULTIARCH) ++ ++PATH := $(CURDIR)/bin:$(PATH) ++export PATH ++ ++CHANGELOG_VARS := $(shell dpkg-parsechangelog | sed -n 's/ /_/g;/^[^_]/s/^\([^:]*\):_\(.*\)/\1=\2/p') ++PKGSOURCE := $(call vafilt,$(CHANGELOG_VARS),Source) ++PKGVERSION := $(call vafilt,$(CHANGELOG_VARS),Version) ++PKGYEAR := $(shell echo $(call vafilt,$(CHANGELOG_VARS),Timestamp) | date --utc +%Y) ++ ++distribution := $(shell lsb_release --id --short) ++distrel := $(shell lsb_release --codename --short) ++derivative := $(shell if dpkg-vendor --derives-from Ubuntu; then echo Ubuntu; \ ++ elif dpkg-vendor --derives-from Debian; then echo Debian; \ ++ else echo Unknown; fi) ++ifeq ($(distrel),n/a) ++ distrel := sid ++endif ++ ++ifneq (,$(filter $(distrel),precise)) ++ OBJCOPY := objcopy ++ STRIP := strip ++else ++ OBJCOPY := $(DEB_HOST_GNU_TYPE)-objcopy ++ STRIP := $(DEB_HOST_GNU_TYPE)-strip ++endif ++ ++multiarch_dir = /$(DEB_HOST_MULTIARCH) ++ ++hotspot_archs = amd64 i386 arm64 armhf ppc64 ppc64el riscv64 s390x ++jtreg_archs = $(hotspot_archs) alpha ia64 powerpc ppc64 sh4 x32 ++jtreg_archs += armel mipsel mips64el ++jtreg_archs += riscv64 ++ ++# FIXME: use bootcycle builds for zero archs? ++bootcycle_build = $(if $(filter $(DEB_HOST_ARCH), $(hotspot_archs)),yes) ++#bootcycle_build = $(if $(filter $(DEB_HOST_ARCH), amd64 i386),yes) ++#bootcycle_build = ++zbootcycle_build = ++zbootcycle_build = yes ++ifneq ($(DEB_HOST_ARCH),$(DEB_BUILD_ARCH)) ++ bootcycle_build = ++ zbootcycle_build = ++endif ++ifneq (,$(filter nobootcycle, $(DEB_BUILD_OPTIONS))) ++ bootcycle_build = ++ zbootcycle_build = ++endif ++ ++ifneq (,$(filter $(distrel),lucid precise quantal raring saucy trusty utopic vivid wily lenny etch squeeze wheezy jessie)) ++ is_default = no ++else ++ is_default = yes ++endif ++ ++is_upstream_release = yes ++ ++srcdir = . ++builddir = build ++zbuilddir = build-zero ++ ++VENDOR = openjdk ++Vendor = OpenJDK ++TOP = usr/lib/jvm ++origin = openjdk ++basename = openjdk-$(shortver) ++ifneq (,$(filter $(DEB_HOST_ARCH), $(hotspot_archs))) ++ priority = $(if $(filter yes, $(is_default)),1711,1709) ++ vm_name = Hotspot JIT ++else ++ priority = $(if $(filter yes, $(is_default)),1704,1703) ++ vm_name = Hotspot Zero ++endif ++jvmver = 1.17.0 ++shortver = 17 ++v_debian := $(shell echo $(PKGVERSION) | sed 's/ea//' | sed 's/\~us[[:digit:]]\+//') ++v_upstream := $(shell echo $(v_debian) | sed 's/-[^-][^-]*$$//') ++v_pkgrel := $(shell echo $(v_debian) | sed 's/^.*-//') ++ ++ifeq (yes,$(is_upstream_release)) ++ v_upbase := $(word 1, $(subst +, , $(v_upstream))) ++ v_upbuild := $(word 2, $(subst +, , $(v_upstream))) ++ ifneq ($(v_debian),$(v_upbase)+$(v_upbuild)-$(v_pkgrel)) ++ $(error wrong version: $(v_upbase)+$(v_upbuild)-$(v_pkgrel) should be: $(v_debian)) ++ endif ++else ++ v_upbase := $(word 1, $(subst ~, , $(v_upstream))) ++ v_upbuild := $(word 2, $(subst ~, , $(v_upstream))) ++ ifneq ($(v_debian),$(v_upbase)~$(v_upbuild)-$(v_pkgrel)) ++ $(error wrong version: $(v_upbase)~$(v_upbuild)-$(v_pkgrel) should be: $(v_debian)) ++ endif ++endif ++ ++ifneq (,$(DEB_HOST_MULTIARCH)) ++ jdirname = java-$(shortver)-$(origin)-$(DEB_HOST_ARCH) ++ jdiralias = java-$(jvmver)-$(origin)-$(DEB_HOST_ARCH) ++ basedir = $(TOP)/$(jdirname) ++ commonbasedir = $(TOP)/java-$(shortver)-$(origin)-common ++ commonbasedir = $(basedir) ++ etcdir = etc/java-$(shortver)-$(origin) ++else ++ jdirname = java-$(shortver)-$(origin) ++ jdiralias = java-$(jvmver)-$(origin) ++ basedir = $(TOP)/$(jdirname) ++ commonbasedir = $(TOP)/java-$(shortver)-$(origin) ++ etcdir = etc/$(jdirname) ++endif ++security = $(etcdir)/security ++ifeq ($(bootcycle_build),yes) ++ sdkimg = bootcycle-build/images/jdk ++ build_target = bootcycle-images test-image ++else ++ sdkimg = images/jdk ++ build_target = images test-image ++endif ++ifeq ($(zbootcycle_build),yes) ++ zsdkimg = bootcycle-build/images/jdk ++ zbuild_target = bootcycle-images test-image ++else ++ zsdkimg = images/jdk ++ zbuild_target = images test-image ++endif ++ ++# Support parallel= in DEB_BUILD_OPTIONS (see #209008) ++COMMA=, ++SPACE = $(EMPTY) $(EMPTY) ++ifneq (,$(filter parallel=%,$(subst $(COMMA), ,$(DEB_BUILD_OPTIONS)))) ++ NJOBS = $(subst parallel=,,$(filter parallel=%,$(subst $(COMMA), ,$(DEB_BUILD_OPTIONS)))) ++endif ++ ++jtreg_pkg = jtreg6 ++testng_pkg= libtestng7-java ++ ++with_check = $(if $(findstring nocheck, $(DEB_BUILD_OPTIONS)),,yes) ++ifneq (,$(filter $(DEB_HOST_ARCH), alpha armel ia64 mipsel mips64el powerpc x32)) ++ with_check = disabled running check on $(DEB_HOST_ARCH) ++endif ++ifeq ($(distribution)-$(distrel),Ubuntu-riscv64) ++ with_check = yes ++endif ++ ++# no jtreg backport yet ++ifneq (,$(filter $(distrel), buster bullseye precise trusty xenial bionic focal groovy hirsute impish)) ++ with_check = disabled for $(distrel), no $(jtreg_pkg) ++endif ++ ++with_docs = $(if $(findstring nodoc, $(DEB_BUILD_OPTIONS)),,yes) ++ifneq (,$(filter $(distrel), precise trusty)) ++ with_docs = ++endif ++#with_check = disabled for this upload ++ ++with_wqy_zenhai = $(if $(filter $(distrel),lenny),,yes) ++ ++arch_map := alpha=alpha arm=arm armel=arm armhf=arm arm64=aarch64 amd64=amd64 hppa=parisc i386=i586 m68k=m68k mips=mips mipsel=mipsel mips64=mips64 mips64el=mips64el powerpc=ppc ppc64=ppc64 ppc64el=ppc64le riscv64=riscv64 sparc=sparc sparc64=sparcv9 sh4=sh s390x=s390x ia64=ia64 x32=x32 ++ ++jvmarch := $(strip $(patsubst $(DEB_HOST_ARCH_CPU)=%, %, \ ++ $(filter $(DEB_HOST_ARCH_CPU)=%, $(arch_map)))) ++ifeq ($(DEB_HOST_ARCH),x32) ++ jvmarch := x32 ++endif ++ ++default_vm = $(if $(filter $(DEB_HOST_ARCH), $(hotspot_archs)),hotspot,zero) ++ ++any_archs = alpha amd64 armel armhf arm64 i386 ia64 mips mipsel mips64 mips64el powerpc ppc64 ppc64el m68k riscv64 sh4 sparc sparc64 s390x x32 ++ ++altzero_archs = $(hotspot_archs) ++ifneq (,$(filter $(distrel),trusty)) ++ altzero_archs := $(filter-out arm64, $(altzero_archs)) ++endif ++# FIXME: https://launchpad.net/ubuntu/+source/openjdk-lts/11.0.5+6-1ubuntu1 ++# ftbfs on armhf, also in OpenJDK 15, now fixed in 16? ++#altzero_archs := $(filter-out armhf, $(altzero_archs)) ++ ++ifneq (,$(filter noaltzero, $(DEB_BUILD_OPTIONS))) ++ altzero_archs = ++else ifneq (,$(filter $(DEB_HOST_ARCH), $(altzero_archs))) ++ alternate_vms += zero ++ zero_dir = zero ++endif ++ ++# needed for the jexec build ++export DEBIAN_JDK_BASE_DIR := /$(basedir) ++ ++BOOTJDK_HOME := $(strip $(subst /bin/javac,, $(firstword $(wildcard \ ++ /usr/lib/jvm/java-16-openjdk-$(DEB_BUILD_ARCH)/bin/javac \ ++ /usr/lib/jvm/java-17-openjdk-$(DEB_BUILD_ARCH)/bin/javac \ ++ )))) ++ ++ifeq ($(DEB_BUILD_ARCH),$(DEB_HOST_ARCH)) ++ BUILDJDK_HOME = $(CURDIR)/$(builddir)/$(sdkimg) ++else ++ BUILDJDK_HOME = $(BOOTJDK_HOME) ++endif ++ ++ifneq (,$(filter $(distrel),saucy)) ++ export CC = gcc-4.8 ++ export CXX = g++-4.8 ++ bd_gcc = g++-4.8, ++else ifneq (,$(filter $(distrel),utopic vivid jessie)) ++ export CC = gcc-4.9 ++ export CXX = g++-4.9 ++ bd_gcc = g++-4.9, ++else ifneq (,$(filter $(distrel),wily)) ++ ifneq (,$(filter $(DEB_HOST_ARCH), $(hotspot_archs))) ++ export CC = $(DEB_HOST_GNU_TYPE)-gcc-5 ++ export CXX = $(DEB_HOST_GNU_TYPE)-g++-5 ++ # FIXME: altzero seems to build with GCC 5 on i386, amd64, ppc64el ... ++ ifneq (,$(filter $(DEB_HOST_ARCH), arm64)) ++ ZERO_CC = $(DEB_HOST_GNU_TYPE)-gcc-4.9 ++ ZERO_CXX = $(DEB_HOST_GNU_TYPE)-g++-4.9 ++ endif ++ else ++ export CC = $(DEB_HOST_GNU_TYPE)-gcc-4.9 ++ export CXX = $(DEB_HOST_GNU_TYPE)-g++-4.9 ++ endif ++ bd_gcc = g++-5 [$(hotspot_archs)], g++-4.9 [$(strip $(foreach a,$(hotspot_archs), !$(a)))], g++-4.9 [arm64], ++else ifneq (,$(filter $(distrel),stretch)) ++ # FIXME: still hotspot core dumps with GCC 6 ++ export CC = $(DEB_HOST_GNU_TYPE)-gcc-6 ++ export CXX = $(DEB_HOST_GNU_TYPE)-g++-6 ++ bd_gcc = g++-6, ++else ifneq (,$(filter $(distrel),artful)) ++ export CC = $(DEB_HOST_GNU_TYPE)-gcc-7 ++ export CXX = $(DEB_HOST_GNU_TYPE)-g++-7 ++ bd_gcc = g++-7, ++else ifneq (,$(filter $(distrel),buster precise trusty xenial bionic cosmic disco)) ++ # FIXME: ftbfs on armhf with the default GCC ++ export CC = $(DEB_HOST_GNU_TYPE)-gcc-8 ++ export CXX = $(DEB_HOST_GNU_TYPE)-g++-8 ++ bd_gcc = g++-8, ++ ifneq (,$(filter $(distrel),buster bionic cosmic disco)) ++ bd_gcc = g++-8 , ++ endif ++else ifneq (,$(filter $(distrel),focal)) ++ export CC = $(DEB_HOST_GNU_TYPE)-gcc-9 ++ export CXX = $(DEB_HOST_GNU_TYPE)-g++-9 ++ bd_gcc = g++-9 , ++else ifneq (,$(filter $(distrel),bullseye groovy hirsute)) ++ export CC = $(DEB_HOST_GNU_TYPE)-gcc-10 ++ export CXX = $(DEB_HOST_GNU_TYPE)-g++-10 ++ bd_gcc = g++-10 , ++else ifneq (,$(filter $(distrel),jammy)) ++ export CC = $(DEB_HOST_GNU_TYPE)-gcc-11 ++ export CXX = $(DEB_HOST_GNU_TYPE)-g++-11 ++ bd_gcc = g++-11 , ++else ++ export CC = $(DEB_HOST_GNU_TYPE)-gcc-12 ++ export CXX = $(DEB_HOST_GNU_TYPE)-g++-12 ++ bd_gcc = g++-12 , ++endif ++ ++# GCC 11 and up ++ifeq (,$(findstring 11 12, $(bd_gcc))) ++ DEB_BUILD_MAINT_OPTIONS = optimize=-lto ++endif ++ ++# for Hotspot: hotspot/make/linux/makefiles/vm.make ++# Don't overwrite Hotspot opt level : ++# -O3 is already default option for hotspot ++ ++# FIXME: ftbfs with -Werror=format-security ++dpkg_buildflags_hs = \ ++ DEB_BUILD_MAINT_OPTIONS="$(DEB_BUILD_MAINT_OPTIONS)" \ ++ DEB_CFLAGS_MAINT_STRIP="-O2 -Werror=format-security" \ ++ DEB_CXXFLAGS_MAINT_STRIP="-O2 -Werror=format-security" \ ++ DEB_CFLAGS_MAINT_APPEND="-fno-stack-protector -Wno-deprecated-declarations" \ ++ DEB_CXXFLAGS_MAINT_APPEND="-fno-stack-protector -Wno-deprecated-declarations$(if $(filter $(DEB_HOST_ARCH), armel armhf), -fpermissive)" ++# jdk/make/CompileDemos.gmk (SetupJVMTIDemo) doesn't like commas in flags ++dpkg_buildflags_hs += \ ++ DEB_LDFLAGS_MAINT_STRIP="-Wl,-z,relro -Wl,-Bsymbolic-functions" \ ++ DEB_LDFLAGS_MAINT_APPEND="-Xlinker -z -Xlinker relro -Xlinker -Bsymbolic-functions -Xlinker --no-as-needed" ++dpkg_buildflags_hs += dpkg-buildflags ++export EXTRA_CPPFLAGS_HS := $(shell $(dpkg_buildflags_hs) --get CPPFLAGS) ++export EXTRA_CFLAGS_HS := $(shell $(dpkg_buildflags_hs) --get CFLAGS; $(dpkg_buildflags_hs) --get CPPFLAGS) ++export EXTRA_CXXFLAGS_HS := $(shell $(dpkg_buildflags_hs) --get CXXFLAGS) -Wno-deprecated-declarations ++export EXTRA_LDFLAGS_HS := $(shell $(dpkg_buildflags_hs) --get LDFLAGS) ++ ++dpkg_buildflags_zero = \ ++ DEB_BUILD_MAINT_OPTIONS="$(DEB_BUILD_MAINT_OPTIONS)" \ ++ DEB_CFLAGS_MAINT_STRIP="-O2 -Werror=format-security -fstack-protector -fstack-protector-strong" \ ++ DEB_CXXFLAGS_MAINT_STRIP="-O2 -Werror=format-security -fstack-protector -fstack-protector-strong" \ ++ DEB_CFLAGS_MAINT_APPEND="-fno-stack-protector -Wno-deprecated-declarations" \ ++ DEB_CXXFLAGS_MAINT_APPEND="-fno-stack-protector -Wno-deprecated-declarations" ++# jdk/make/CompileDemos.gmk (SetupJVMTIDemo) doesn't like commas in flags ++dpkg_buildflags_zero += \ ++ DEB_LDFLAGS_MAINT_STRIP="-Wl,-z,relro -Wl,-Bsymbolic-functions" \ ++ DEB_LDFLAGS_MAINT_APPEND="-Xlinker -z -Xlinker relro -Xlinker -Bsymbolic-functions -Xlinker --no-as-needed" ++dpkg_buildflags_zero += dpkg-buildflags ++export EXTRA_CPPFLAGS_ZERO := $(shell $(dpkg_buildflags_zero) --get CPPFLAGS) ++export EXTRA_CFLAGS_ZERO := $(shell $(dpkg_buildflags_zero) --get CFLAGS; $(dpkg_buildflags_zero) --get CPPFLAGS) ++export EXTRA_CXXFLAGS_ZERO := $(shell $(dpkg_buildflags_zero) --get CXXFLAGS) ++export EXTRA_LDFLAGS_ZERO := $(shell $(dpkg_buildflags_zero) --get LDFLAGS) ++ ++ifeq ($(DEB_HOST_ARCH),alpha) ++ export EXTRA_LDFLAGS_ZERO += -Wl,--no-relax ++endif ++ ++ifneq (,$(filter $(distrel),squeeze lucid)) ++ with_bridge = bridge ++ with_jni_bridge = yes ++else ++ with_bridge = atk ++endif ++ ++with_nss = yes ++ ++on_buildd := $(shell [ -f /CurrentlyBuilding -o "$$LOGNAME" = buildd ] && echo yes) ++ ++CONFIGURE_ARGS = ++ ++# there are problems --with-alt-jar=/usr/bin/fastjar, ++# see http://icedtea.classpath.org/bugzilla/show_bug.cgi?id=217 ++# only use it for zero ports except powerpc (which we assume as ++# fast enough to build. ++ifeq (,$(filter $(DEB_HOST_ARCH), $(hotspot_archs) ia64 powerpc)) ++ CONFIGURE_ARGS += --with-alt-jar=/usr/bin/fastjar ++endif ++ ++ifeq (,$(filter $(DEB_HOST_ARCH), $(hotspot_archs))) ++ CONFIGURE_ARGS += --enable-zero ++endif ++ ++ifeq (,$(filter $(DEB_BUILD_OPTIONS), terse)) ++ #EXTRA_BUILD_ENV += MAKE_VERBOSE=y QUIETLY= LOG=debug ++ EXTRA_BUILD_ENV += MAKE_VERBOSE=y QUIETLY= LOG=info,cmdlines ++endif ++ ++# timestamp skew on kFreeBSD ++EXTRA_BUILD_ENV += IGNORE_OLD_CONFIG=true ++ ++EXTRA_BUILD_ENV += LIBFFI_LIBS=-lffi_pic ++ ++EXTRA_BUILD_ENV += JOBS=$(NJOBS) ++ ++ifeq ($(derivative),Ubuntu) ++ ifeq ($(DEB_HOST_ARCH),s390x) ++ ifneq (,$(filter $(distrel), trusty xenial bionic eoan focal)) ++ export VENDOR_FLAGS_CPU = -march=zEC12 ++ else ++ export VENDOR_FLAGS_CPU = -march=z13 -mtune=z15 ++ endif ++ endif ++endif ++ ++CONFIGURE_ARGS += --enable-system-kerberos ++ ++ifeq ($(with_nss),yes) ++ CONFIGURE_ARGS += --enable-nss ++else ifeq ($(with_nss),no) ++ CONFIGURE_ARGS += --disable-nss ++endif ++ ++ifeq ($(with_systemtap),yes) ++ CONFIGURE_ARGS += --enable-systemtap --with-abs-install-dir=/$(basedir) ++endif ++ ++COMMON_CONFIGURE_ARGS = ++DEFAULT_CONFIGURE_ARGS = ++ZERO_CONFIGURE_ARGS = ++ ++COMMON_CONFIGURE_ARGS += --with-jni-libpath=/usr/lib/$(DEB_HOST_MULTIARCH)/jni:/lib/$(DEB_HOST_MULTIARCH):/usr/lib/$(DEB_HOST_MULTIARCH):/usr/lib/jni:/lib:/usr/lib ++ ++# FIXME: --with-jvm-variants=server,zero not supported upstream ++ifneq (,$(filter $(DEB_HOST_ARCH),$(hotspot_archs))) ++ ifneq (,$(filter $(DEB_HOST_ARCH),i386)) ++ DEFAULT_CONFIGURE_ARGS += --with-jvm-variants=client,server ++ else ++ DEFAULT_CONFIGURE_ARGS += --with-jvm-variants=server ++ endif ++else ++ DEFAULT_CONFIGURE_ARGS += --host=$(DEB_HOST_GNU_TYPE) --build=$(DEB_BUILD_GNU_TYPE) ++ DEFAULT_CONFIGURE_ARGS += --with-jvm-variants=zero ++endif ++ZERO_CONFIGURE_ARGS += --with-jvm-variants=zero ++ ++DEFAULT_CONFIGURE_ARGS += --with-boot-jdk=$(BOOTJDK_HOME) ++ZERO_CONFIGURE_ARGS += --with-boot-jdk=$(BUILDJDK_HOME) ++ ++COMMON_CONFIGURE_ARGS += --disable-ccache ++ ++# FIXME: zero not ready for --disable-precompiled-headers ++#COMMON_CONFIGURE_ARGS += --disable-precompiled-headers ++ifneq (,$(filter $(DEB_HOST_ARCH),$(hotspot_archs))) ++ DEFAULT_CONFIGURE_ARGS += --disable-precompiled-headers ++endif ++ ++ifneq (,$(filter $(DEB_HOST_ARCH), powerpc)) ++ DEFAULT_CONFIGURE_ARGS += --with-boot-jdk-jvmargs="-XX:ThreadStackSize=2240" ++endif ++ ++ifneq (,$(filter $(DEB_HOST_ARCH), ppc64 ppc64el)) ++ ZERO_CONFIGURE_ARGS += --with-boot-jdk-jvmargs="-XX:ThreadStackSize=2240" ++endif ++ ++ifeq ($(with_check),yes) ++ COMMON_CONFIGURE_ARGS += --with-jtreg=/usr/share/jtreg ++else ++ COMMON_CONFIGURE_ARGS += --disable-jtreg-failure-handler ++endif ++ ++COMMON_CONFIGURE_ARGS += --disable-warnings-as-errors ++ ++ifneq (,$(filter $(DEB_HOST_ARCH),$(hotspot_archs))) ++ ifneq (,$(filter $(DEB_HOST_ARCH), amd64)) ++ jvm_features = zgc ++ endif ++ # not needed explicitly? ++ #jvm_features += shenandoahgc ++ DEFAULT_CONFIGURE_ARGS += --with-jvm-features=$(subst $(SPACE),$(COMMA),$(strip $(jvm_features))) ++endif ++ ++vendor := $(shell dpkg-vendor --query Vendor) ++ifeq ($(vendor),Debian) ++ vendor_url = https://tracker.debian.org/pkg/$(basename) ++ vendor_bugs = https://bugs.debian.org/src:$(basename) ++else ifeq ($(vendor),Ubuntu) ++ vendor_url = https://launchpad.net/ubuntu/+source/$(basename) ++ vendor_bugs = https://bugs.launchpad.net/ubuntu/+source/$(basename) ++else ++ vendor_url = $(shell dpkg-vendor --query Vendor-URL) ++ vendor_bugs = $(shell dpkg-vendor --query Bugs) ++endif ++ ++ifneq (,$(vendor)) ++ COMMON_CONFIGURE_ARGS += \ ++ --with-vendor-name='$(vendor)' \ ++ --with-vendor-url='$(vendor_url)' \ ++ --with-vendor-bug-url='$(vendor_bugs)' \ ++ --with-vendor-vm-bug-url='$(vendor_bugs)' ++endif ++ ++# not sure if this is a good idea, might change parsing of java -version ++#COMMON_CONFIGURE_ARGS += \ ++# --with-vendor-version-string='$(PKGVERSION)' ++ ++ifeq ($(distribution),Debian) ++ COMMON_CONFIGURE_ARGS += \ ++ --with-vendor-name='$(distribution)' \ ++ --with-vendor-url='https://tracker.debian.org/$(basename)' \ ++ --with-vendor-bug-url='https://bugs.debian.org/$(basename)' \ ++ --with-vendor-vm-bug-url='https://bugs.debian.org/$(basename)' ++else ifeq ($(distribution),Ubuntu) ++ ppa_build := $(shell if echo 'v_pkgrel' | egrep '^[0-9]+(ubuntu[0-9]+)?(~[0-9.]+)?$$'; then echo no; else echo yes; fi) ++ ifeq ($(ppa_build),yes) ++ COMMON_CONFIGURE_ARGS += \ ++ --with-vendor-name='Private Build' \ ++ --with-vendor-url='Unknown' \ ++ --with-vendor-bug-url='Unknown' \ ++ --with-vendor-vm-bug-url='Unknown' ++ else ++ COMMON_CONFIGURE_ARGS += \ ++ --with-vendor-name='$(distribution)' \ ++ --with-vendor-url='https://ubuntu.com/' \ ++ --with-vendor-bug-url='https://bugs.launchpad.net/ubuntu/+source/$(basename)' \ ++ --with-vendor-vm-bug-url='https://bugs.launchpad.net/ubuntu/+source/$(basename)' ++ endif ++else ++ COMMON_CONFIGURE_ARGS += \ ++ --with-vendor-name='$(distribution)' \ ++ --with-vendor-url='Unknown' \ ++ --with-vendor-bug-url='Unknown' \ ++ --with-vendor-vm-bug-url='Unknown' ++endif ++ ++COMMON_CONFIGURE_ARGS += \ ++ --with-version-pre='$(if $(filter yes, $(is_upstream_release)),,ea)' \ ++ --with-version-build='$(v_upbuild)' \ ++ --with-version-opt='$(distribution)-$(v_pkgrel)' \ ++ --with-copyright-year=$(PKGYEAR) \ ++ ++COMMON_CONFIGURE_ARGS += \ ++ --with-debug-level=release \ ++ --with-native-debug-symbols=external \ ++ --enable-unlimited-crypto \ ++ --with-zlib=system \ ++ --with-giflib=system \ ++ --with-libpng=system \ ++ --with-libjpeg=system \ ++ --with-lcms=system \ ++ --with-pcsclite=system \ ++ --disable-warnings-as-errors \ ++ --disable-javac-server \ ++ ++ifeq ($(with_check),yes) ++# temporary: disable gtest until compile errors are fixed in jammy ++ ifeq (,$(filter $(distrel),bullseye jammy)) ++ COMMON_CONFIGURE_ARGS += \ ++ --with-gtest=system ++ endif ++endif ++ ++ifeq (,$(filter $(distrel),jessie stretch precise trusty xenial)) ++ COMMON_CONFIGURE_ARGS += \ ++ --with-harfbuzz=system ++ with_system_hb = yes ++endif ++ ++ifneq (,$(filter $(distrel),precise trusty xenial)) ++ # building with a GCC from a PPA ... ++ COMMON_CONFIGURE_ARGS += \ ++ --with-stdc++lib=static ++else ifneq (,$(filter $(DEB_HOST_ARCH), ia64)) ++ # seen in 2023 on ia64: ++ # /usr/bin/ld: jvm-test-launcher: no symbol version section for versioned symbol `__cxa_pure_virtual@CXXABI_1.3' ++ # works when linking statically ++ COMMON_CONFIGURE_ARGS += \ ++ --with-stdc++lib=static ++else ++ COMMON_CONFIGURE_ARGS += \ ++ --with-stdc++lib=dynamic ++endif ++ ++ifneq (,$(NJOBS)) ++ COMMON_CONFIGURE_ARGS += --with-num-cores=$(NJOBS) ++endif ++ ++# FIXME: upstream says the extra flags are not designed for general purpose ... ++# http://mail.openjdk.java.net/pipermail/jdk8u-dev/2014-July/001389.html ++ ++# FIXME: demos fail to build with these ... ++# FIXME: zero ftbfs without -fpermissive ++#src/cpu/zero/vm/cppInterpreter_zero.cpp:732:30: error: invalid conversion from 'intptr_t {aka long int}' to 'const void*' [-fpermissive] ++# object = (oop) locals[0]; ++# ^ ++ ++#COMMON_CONFIGURE_ARGS += \ ++ --with-extra-cflags='$(EXTRA_CPPFLAGS_HS) $(EXTRA_CFLAGS_HS)' \ ++ --with-extra-cxxflags='$(EXTRA_CPPFLAGS_HS) $(EXTRA_CXXFLAGS_HS)' \ ++ --with-extra-ldflags='$(EXTRA_LDFLAGS_HS)' ++ ++ifneq (,$(filter $(DEB_HOST_ARCH), $(hotspot_archs))) ++ DEFAULT_CONFIGURE_ARGS += \ ++ --with-extra-cflags='$(EXTRA_CPPFLAGS_HS) $(EXTRA_CFLAGS_HS)' \ ++ --with-extra-cxxflags='$(EXTRA_CPPFLAGS_HS) $(EXTRA_CXXFLAGS_HS)' \ ++ --with-extra-ldflags='$(EXTRA_LDFLAGS_HS)' ++else ++ DEFAULT_CONFIGURE_ARGS += \ ++ --with-extra-cflags='$(EXTRA_CPPFLAGS_ZERO) $(EXTRA_CFLAGS_ZERO)' \ ++ --with-extra-cxxflags='$(EXTRA_CPPFLAGS_ZERO) $(EXTRA_CXXFLAGS_ZERO)' \ ++ --with-extra-ldflags='$(EXTRA_LDFLAGS_ZERO)' ++endif ++ ++ZERO_CONFIGURE_ARGS += \ ++ --with-extra-cflags='$(EXTRA_CPPFLAGS_ZERO) $(EXTRA_CFLAGS_ZERO)' \ ++ --with-extra-cxxflags='$(EXTRA_CPPFLAGS_ZERO) $(EXTRA_CXXFLAGS_ZERO)' \ ++ --with-extra-ldflags='$(EXTRA_LDFLAGS_ZERO)' ++ ++ifneq ($(DEB_HOST_ARCH),$(DEB_BUILD_ARCH)) ++ COMMON_CONFIGURE_ARGS += \ ++ --build=$(DEB_BUILD_GNU_TYPE) \ ++ --host=$(DEB_HOST_GNU_TYPE) \ ++ --target=$(DEB_HOST_GNU_TYPE) \ ++ BUILD_CC=$(subst $(DEB_HOST_GNU_TYPE),$(DEB_BUILD_GNU_TYPE),$(CC)) \ ++ BUILD_LD=$(subst $(DEB_HOST_GNU_TYPE),$(DEB_BUILD_GNU_TYPE),$(CC)) \ ++ BUILD_CXX=$(subst $(DEB_HOST_GNU_TYPE),$(DEB_BUILD_GNU_TYPE),$(CXX)) \ ++ ++endif ++ ++# (most) jre and jdk tools handled by the alternatives system. ++# dups in heimdal-clients, krb5-user: kinit, klist, ktab, no alternatives ++# have jrunscript in the jdk packages ++ ++all_tools = jar jarsigner java javac javadoc javap jcmd \ ++ jconsole jdb jdeprscan jdeps jfr jimage jinfo jlink jmap \ ++ jmod jpackage jps jrunscript jshell jstack jstat jstatd keytool \ ++ rmiregistry serialver ++ ++jdk_hl_tools = jar jarsigner javac javadoc javap jcmd jdb \ ++ jdeprscan jdeps jfr jimage jinfo jlink jmap jmod jps \ ++ jrunscript jshell jstack jstat jstatd serialver ++ifeq (,$(filter $(DEB_HOST_ARCH), alpha armel ia64 m68k mips mipsel mips64el powerpc s390x sh4 sparc64 x32)) ++ all_tools += jhsdb ++ jdk_hl_tools += jhsdb ++endif ++jdk_tools = jconsole ++ ++all_jre_tools = $(filter-out $(jdk_hl_tools) $(jdk_tools), $(all_tools)) ++# no tools in $(p_jre) ++jre_tools = ++jre_hl_tools := $(filter-out $(jre_tools), $(all_jre_tools)) ++ ++# build dependencies ++ifneq (,$(filter $(distrel),precise)) ++ bd_autotools = autoconf2.69, automake, autotools-dev, ++else ++ bd_autotools = autoconf, automake, autotools-dev, ++endif ++ ++ifneq (,$(filter $(distrel),precise)) ++ bd_bootstrap = openjdk-16-jdk-headless | openjdk-17-jdk-headless, ++else ifneq (,$(filter $(distrel),squeeze wheezy jessie trusty xenial)) ++ bd_bootstrap = openjdk-16-jdk-headless:native | openjdk-17-jdk-headless:native, ++else ++ bd_bootstrap = openjdk-16-jdk-headless:native | openjdk-17-jdk-headless:native, ++ bd_bootstrap = openjdk-17-jdk-headless:native | openjdk-16-jdk-headless:native, ++endif ++ ++ifeq (,$(filter $(distrel),squeeze wheezy jessie precise trusty xenial)) ++ bd_cross += \ ++ $(basename)-jdk-headless , ++else ++ bd_cross += \ ++ file, ++endif ++ ++ifneq (,$(filter $(distrel),squeeze wheezy jessie precise trusty)) ++ old_debhelper = veryold ++ bd_debhelper = debhelper (>= 9), ++else ifneq (,$(filter $(distrel), stretch xenial)) ++ old_debhelper = old ++ bd_debhelper = debhelper (>= 9.20141010), dpkg-dev (>= 1.17.14), ++else ++ bd_debhelper = debhelper (>= 11), ++endif ++ ++ifneq (,$(filter $(distrel),precise)) ++ bd_syslibs = zlib1g-dev, libattr1-dev, ++else ++ bd_syslibs = zlib1g-dev:native, zlib1g-dev, libattr1-dev, ++endif ++ifneq (,$(filter $(distrel),squeeze lucid)) ++ bd_syslibs += libpng12-dev, libjpeg62-dev, ++else ifneq (,$(filter $(distrel),wheezy precise quantal raring saucy)) ++ bd_syslibs += libpng-dev, libjpeg8-dev, ++else ++ bd_syslibs += libpng-dev, libjpeg-dev, ++endif ++bd_syslibs += \ ++ libgif-dev, ++bd_openjdk = \ ++ libxtst-dev, libxi-dev, libxt-dev, libxaw7-dev, libxrender-dev, libcups2-dev, \ ++ libasound2-dev, liblcms2-dev, libfreetype6-dev (>= 2.2.1), \ ++ libxinerama-dev, libkrb5-dev, xsltproc, libpcsclite-dev, libxrandr-dev, \ ++ libelf-dev, libfontconfig1-dev, libgtk2.0-0 | libgtk-3-0, ++ifeq ($(with_system_hb),yes) ++ bd_openjdk += libharfbuzz-dev, ++endif ++ ++ifeq (,$(filter $(distrel),precise)) ++ bd_zero = libffi-dev, libffi-dev:native, ++else ++ bd_zero = libffi-dev, ++endif ++ ++bd_ant = ant, ant-optional, ++ ++bd_fastjar = fastjar (>= 2:0.96-0ubuntu2), ++ifeq ($(with_nss),yes) ++ ifneq (,$(filter $(distrel),squeeze lucid precise quantal)) ++ bd_nss = libnss3-dev (>= 3.12.3), ++ else ifneq (,$(filter $(distrel),wheezy raring saucy trusty)) ++ bd_nss = libnss3-dev (>= 2:3.12.3), ++ else ++ bd_nss = libnss3-dev (>= 2:3.17.1), ++ endif ++endif ++ifeq ($(with_systemtap),yes) ++ bd_systemtap = systemtap-sdt-dev [!sh4], ++endif ++ ++ifeq (,$(filter $(distrel),jessie stretch buster precise trusty xenial bionic focal groovy)) ++ with_debugedit = yes ++ bd_debugedit = debugedit (>= 4.16), ++ build_id_seed = $(PKGSOURCE)-$(PKGVERSION) ++endif ++ ++ifeq (,$(filter $(distrel),jessie stretch buster precise trusty xenial bionic)) ++ bd_strip_ndt = strip-nondeterminism, ++endif ++ ++ifeq (,$(filter $(distrel),precise)) ++ nocheck_profile = $(EMPTY) ++endif ++ ++# look at all TEST.ROOT for the required jtreg version, sorted by newest ++min_jtreg_version := $(shell find test/ -name TEST.ROOT -exec grep -hr "^requiredVersion=" {} \; | sed -e 's:^requiredVersion=::' -e 's:$$:-0~:' | sort -uVr | head -n1 | tr ' ' '-') ++ ++ifeq ($(with_check),yes) ++ bd_check = \ ++ $(jtreg_pkg) (>= $(min_jtreg_version))$(nocheck_profile), $(testng_pkg)$(nocheck_profile), \ ++ xvfb$(nocheck_profile), xauth$(nocheck_profile), \ ++ xfonts-base$(nocheck_profile), libgl1-mesa-dri [!x32]$(nocheck_profile), \ ++ xfwm4$(nocheck_profile), x11-xkb-utils$(nocheck_profile), \ ++ dbus-x11$(nocheck_profile), googletest$(nocheck_profile), \ ++ google-mock$(nocheck_profile), xvfb $(nocheck_profile), ++ifeq (,$(filter $(distrel),jessie stretch buster)) ++ bd_check += libasmtools-java$(nocheck_profile), ++endif ++else ++ bd_check = file, ++endif ++ ++dlopen_hl_depends = \ ++ libcups2, liblcms2-2, ++ifeq ($(derivative),Ubuntu) ++ ifneq (,$(filter $(distrel),lucid)) ++ dlopen_hl_depends += libjpeg62, ++ else ++ dlopen_hl_depends += libjpeg8, ++ endif ++else ++ ifneq (,$(filter $(distrel),squeeze)) ++ dlopen_hl_depends += libjpeg62, ++ else ++ dlopen_hl_depends += libjpeg62-turbo, ++ endif ++endif ++dlopen_hl_depends += libfontconfig1, ++ifneq ($(with_nss),no) ++ ifneq (,$(filter $(distrel),squeeze lucid precise)) ++ dlopen_hl_depends += , libnss3-1d (>= 3.12.3) ++ else ifneq (,$(filter $(distrel),wheezy quantal raring saucy trusty)) ++ dlopen_hl_depends += , libnss3 (>= 2:3.12.3) ++ else ++ dlopen_hl_depends += , libnss3 (>= 2:3.17.1) ++ endif ++endif ++dlopen_hl_recommends = ++dlopen_jre_depends = \ ++ libglib2.0-0 (>= 2.24), libgtk2.0-0 | libgtk-3-0, libxrandr2, libxinerama1, ++ifneq (,$(filter $(distrel),squeeze wheezy jessie stretch lucid precise xenial)) ++ dlopen_jre_depends += libgl1-mesa-glx | libgl1, ++else ++ dlopen_jre_depends += libgl1, ++endif ++ifeq ($(with_system_hb),yes) ++ dlopen_jre_depends += libharfbuzz0b, ++endif ++dlopen_jre_recommends = ++ ++# .desktop files need to be multiarch installable ++java_launcher = /$(basedir)/bin/java ++java_launcher = /usr/bin/java ++ifeq ($(derivative),Ubuntu) ++ java_launcher = cautious-launcher %f /$(basedir)/bin/java ++ java_launcher = cautious-launcher %f /usr/bin/java ++endif ++ ++ifneq (,$(filter $(distrel),squeeze wheezy lucid precise quantal raring)) ++ core_fonts = ttf-dejavu-extra ++else ++ core_fonts = fonts-dejavu-extra ++endif ++ifneq (,$(filter $(distrel),squeeze lucid)) ++ cjk_fonts = ttf-baekmuk | ttf-unfonts | ttf-unfonts-core, ++ cjk_fonts += ttf-sazanami-gothic | ttf-kochi-gothic, ++ cjk_fonts += ttf-sazanami-mincho | ttf-kochi-mincho, ++else ++ cfk_fonts = fonts-nanum, ++ cjk_fonts += fonts-ipafont-gothic, fonts-ipafont-mincho, ++endif ++cjk_fonts += $(if $(with_wqy_zenhai),fonts-wqy-microhei | fonts-wqy-zenhei,fonts-arphic-uming), ++ifneq (,$(filter $(distrel),squeeze wheezy)) ++ cjk_fonts += ttf-indic-fonts, ++else ifneq (,$(filter $(distrel), lucid maverick natty oneiric precise quantal raring saucy trusty)) ++ cjk_fonts += ttf-indic-fonts-core, ttf-telugu-fonts, ttf-oriya-fonts, ++ cjk_fonts += ttf-kannada-fonts, ttf-bengali-fonts, ++else ++ cjk_fonts += fonts-indic, ++endif ++ ++jrehl_breaks = clojure1.8 (<= 1.8.0-7ubuntu1~), ++ifneq (,$(filter $(distrel),buster sid bionic cosmic disco eoan)) ++ jrehl_breaks += \ ++ jetty9 (<< 9.4.15-1~), netbeans (<< 10.0-3~), visualvm (<< 1.4.2-2~), \ ++ eclipse-platform (<< 3.8.1-11), eclipse-rcp (<< 3.8.1-11), \ ++ libequinox-osgi-java (<< 3.9.1), ++endif ++ifneq (,$(filter $(distrel),bionic cosmic disco eoan)) ++ jrehl_breaks += \ ++ tomcat8 (<< 8.5.39-1ubuntu1~), ++endif ++ifneq (,$(filter $(distrel),buster sid)) ++ jrehl_breaks += \ ++ libreoffice-core (<< 1:6.1.5-2~), ++else ifneq (,$(filter $(distrel),disco)) ++ jrehl_breaks += \ ++ libreoffice-core (<< 1:6.2.2-0ubuntu2), ++endif ++ ++p_jre = $(basename)-jre ++p_jrehl = $(basename)-jre-headless ++p_jrez = $(basename)-jre-zero ++p_jdkhl = $(basename)-jdk-headless ++p_jdk = $(basename)-jdk ++p_demo = $(basename)-demo ++p_src = $(basename)-source ++p_doc = $(basename)-doc ++p_dbg = $(basename)-dbg ++ ++d = debian/tmp ++d_jre = debian/$(p_jre) ++d_jrehl = debian/$(p_jrehl) ++d_jrez = debian/$(p_jrez) ++d_jdkhl = debian/$(p_jdkhl) ++d_jdk = debian/$(p_jdk) ++d_demo = debian/$(p_demo) ++d_src = debian/$(p_src) ++d_doc = debian/$(p_doc) ++d_dbg = debian/$(p_dbg) ++ ++control_vars = \ ++ '-Vvm:Name=$(vm_name)' \ ++ '-Vvm:Version=$(shortver)' \ ++ '-Vdlopenhl:Depends=$(dlopen_hl_depends)' \ ++ '-Vdlopenhl:Recommends=$(dlopen_hl_recommends)' \ ++ '-Vdlopenjre:Depends=$(dlopen_jre_depends)' \ ++ '-Vdlopenjre:Recommends=$(dlopen_jre_recommends)' \ ++ ++ifeq ($(with_bridge),atk) ++ control_vars += '-Vbridge:Recommends=libatk-wrapper-java-jni (>= 0.30.4-0ubuntu2)' ++else ifeq ($(with_bridge),yes) ++ ifneq (,$(DEB_HOST_MULTIARCH)) ++ control_vars += '-Vbridge:Recommends=libaccess-bridge-java-jni (>= 1.26.2-6)' ++ else ifeq ($(with_jni_bridge),yes) ++ control_vars += '-Vbridge:Recommends=libaccess-bridge-java-jni' ++ else ++ control_vars += '-Vbridge:Recommends=libaccess-bridge-java' ++ endif ++endif ++ ++ifeq (,$(filter $(DEB_HOST_ARCH), $(hotspot_archs))) ++ control_vars += '-Vdefaultvm:Provides=$(p_jre)-zero' ++endif ++ ++pkg_certs = ca-certificates-java ++ifneq (,$(filter $(distrel), squeeze wheezy jessie stretch precise trusty xenial bionic cosmic disco)) ++ control_vars += '-Vcacert:Depends=$(pkg_certs)' ++else ++ control_vars += '-Vcacert:Depends=$(pkg_certs) (>= 20190405~)' ++endif ++ ++ifneq (,$(filter $(distrel), squeeze wheezy jessie lucid precise quantal raring saucy trusty utopic vivid)) ++ control_vars += '-Vmountpoint:Depends=initscripts' ++else ++ control_vars += '-Vmountpoint:Depends=util-linux (>= 2.26.2-4)' ++endif ++ ++pkg_jcommon = java-common (>= 0.28) ++control_vars += '-Vjcommon:Depends=$(pkg_jcommon)' ++ ++control_vars += '-Vjrehl:Breaks=$(jrehl_breaks)' ++ ++ifneq ($(with_debugedit),yes) ++ control_vars += '-Vdbg:Conflicts=openjdk-11-dbg, openjdk-12-dbg, openjdk-13-dbg, openjdk-14-dbg, openjdk-15-dbg, openjdk-16-dbg' ++endif ++ ++debian/control: debian/control.in debian/tests/control debian/rules ++ @cp -p debian/control debian/control.old ++ sed \ ++ -e 's/@basename@/$(basename)/g' \ ++ -e 's/@bd_autotools@/$(bd_autotools)/g' \ ++ -e 's/@bd_bootstrap@/$(strip $(bd_bootstrap))/g' \ ++ -e 's/@bd_cross@/$(strip $(bd_cross))/g' \ ++ -e 's/@bd_debhelper@/$(strip $(bd_debhelper))/g' \ ++ -e 's/@bd_openjdk@/$(bd_openjdk)/g' \ ++ -e 's/@bd_zero@/$(bd_zero)/g' \ ++ -e 's/@bd_gcc@/$(bd_gcc)/g' \ ++ -e 's/@bd_syslibs@/$(bd_syslibs)/g' \ ++ -e 's/@bd_fastjar@/$(bd_fastjar)/g' \ ++ -e 's/@bd_nss@/$(bd_nss)/g' \ ++ -e 's/@bd_systemtap@/$(bd_systemtap)/g' \ ++ -e 's/@bd_ant@/$(bd_ant)/g' \ ++ -e 's/@bd_debugedit@/$(bd_debugedit)/g' \ ++ -e 's/@bd_strip_ndt@/$(bd_strip_ndt)/g' \ ++ -e 's/@bd_check@/$(bd_check)/g' \ ++ -e 's/@core_fonts@/$(core_fonts)/g' \ ++ -e 's/@cjk_fonts@/$(cjk_fonts)/g' \ ++ -e 's/@any_archs@/$(any_archs)/g' \ ++ -e 's/@hotspot_archs@/$(hotspot_archs)/g' \ ++ -e 's/@altzero_archs@/$(altzero_archs)/g' \ ++ debian/control.in \ ++ $(if $(altzero_archs), debian/control.zero-jre) \ ++ $(if $(DEB_HOST_MULTIARCH),,| grep -v '^Multi-Arch') \ ++ > debian/control ++ @if cmp -s debian/control debian/control.old; then \ ++ rm -f debian/control.old; \ ++ else \ ++ diff -u debian/control.old debian/control | wdiff -d -; \ ++ echo "debian/control did change, please restart the build"; \ ++ rm -f debian/control.old; \ ++ exit 1; \ ++ fi ++ ++debian/tests/control: debian/tests/control.in debian/rules ++ sed \ ++ -e 's/@jtreg_pkg@/$(jtreg_pkg)/g' \ ++ -e 's/@min_jtreg_version@/$(min_jtreg_version)/g' \ ++ $< > $@; ++ ++debian/tests/%.sh: debian/tests/%.in debian/rules ++ sed \ ++ -e 's,@JDK_DIR@,/$(basedir),g' \ ++ -e 's,@jtreg_pkg@,$(jtreg_pkg),g' \ ++ -e 's/@jtreg_archs@/$(jtreg_archs)/g' \ ++ -e 's:@doc_dir@:/usr/share/doc/$(p_jrehl)/:g' \ ++ $< > $@; ++ chmod +x $@ ++ ++gen-autopkgtests: debian/tests/jtdiff-autopkgtest.sh debian/tests/jtreg-autopkgtest.sh ++ ++packaging-files: ++ for f in debian/*.in; do \ ++ case "$$f" in debian/control.in) continue; esac; \ ++ f2=$$(echo $$f | sed 's/JB/$(basename)/;s/\.in$$//'); \ ++ sed -e 's/@JRE@/$(p_jre)/g' \ ++ -e 's/@JDK@/$(p_jdk)/g' \ ++ -e 's/@vendor@/$(Vendor)/g' \ ++ -e 's/@RELEASE@/$(shortver)/g' \ ++ -e 's/@basename@/$(basename)/g' \ ++ -e 's,@TOP@,$(TOP),g' \ ++ -e 's,@basedir@,$(basedir),g' \ ++ -e 's,@etcdir@,$(etcdir),g' \ ++ -e 's,@jdiralias@,$(jdiralias),g' \ ++ -e 's,@jdirname@,$(jdirname),g' \ ++ -e 's/@srcname@/$(srcname)/g' \ ++ -e 's/@jvmarch@/$(jvmarch)/g' \ ++ -e 's/@tag@/$(tag)/g' \ ++ -e 's/@priority@/$(priority)/g' \ ++ -e 's/@mantag@/$(mantag)/g' \ ++ -e 's/@multiarch@/$(DEB_HOST_MULTIARCH)/g' \ ++ -e 's/@jre_hl_tools@/$(jre_hl_tools)/g' \ ++ -e 's/@jre_tools@/$(jre_tools)/g' \ ++ -e 's/@jdk_hl_tools@/$(jdk_hl_tools)/g' \ ++ -e 's/@jdk_tools@/$(jdk_tools)/g' \ ++ -e 's/@corba_tools@/$(corba_tools)/g' \ ++ -e 's,@j2se_lib@,$(j2se_lib),g' \ ++ -e 's,@j2se_share@,$(j2se_share),g' \ ++ -e 's,@java_launcher@,$(java_launcher),g' \ ++ -e 's/@cjk_fonts@/$(cjk_fonts)/g' \ ++ $$f > $$f2; \ ++ done ++ ++configure: stamps/configure ++stamps/configure: stamps/unpack ++ @echo '========== $@ ==========' ++ @echo "Build options: $(DEB_BUILD_OPTIONS), Processors: $$(nproc)" ++ @echo '========== $@ ==========' ++ -cat /proc/meminfo |egrep '^(Mem|Swap)' ++ @echo '========== $@ ==========' ++ -cat /etc/hosts ++ @echo '========== $@ ==========' ++ -cat /etc/fstab ++ @echo '========== $@ ==========' ++ -cat /etc/mtab ++ @echo '========== $@ ==========' ++ -cat /proc/mounts ++ @echo '========== $@ ==========' ++ ++ mkdir -p bin ++ifeq (0,1) ++ifeq (,$(filter $(DEB_HOST_ARCH), alpha amd64 arm64 hppa mips64 mips64el ppc64 ppc64el s390x sparc64 x32)) ++ ( \ ++ echo '#! /bin/sh'; \ ++ echo 'if [ -x /usr/bin/linux32 ]; then'; \ ++ echo ' exec /usr/bin/linux32 /bin/uname "$$@"'; \ ++ echo 'else'; \ ++ echo ' exec /bin/uname "$$@"'; \ ++ echo 'fi'; \ ++ ) > bin/uname ++ chmod +x bin/uname ++ echo "UNAME checks" ++ uname -a ++endif ++endif ++ifneq (,$(filter $(distrel),precise)) ++ for i in autoreconf autoheader autoupdate autoscan autom4te ifnames autoconf; do \ ++ ln -sf /usr/bin/$${i}2.69 bin/$${i}; \ ++ done ++endif ++ which autoconf ++ /bin/uname -a ++ lsb_release -a ++ ++ -cat /proc/sys/kernel/random/entropy_avail ++ ++ mkdir -p stamps ++ rm -rf $(builddir) ++ mkdir -p $(builddir) ++# bash make/autoconf/autogen.sh ++ cd $(builddir) && $(EXTRA_BUILD_ENV) bash ../configure \ ++ $(DEFAULT_CONFIGURE_ARGS) \ ++ $(COMMON_CONFIGURE_ARGS) ++ ++ touch $@ ++ ++stamps/zero-configure: stamps/build stamps/unpack ++ @echo '========== $@ ==========' ++ mkdir -p stamps ++ rm -rf $(zbuilddir) ++ mkdir -p $(zbuilddir) ++ $(if $(ZERO_CC), export CC=$(ZERO_CC); export CXX=$(ZERO_CXX);) \ ++ cd $(zbuilddir) && $(EXTRA_BUILD_ENV) bash ../configure \ ++ $(ZERO_CONFIGURE_ARGS) \ ++ $(COMMON_CONFIGURE_ARGS) ++ ++ touch $@ ++ ++stamps/unpack: ++ : # apply some architecture specific patches ... ++#ifeq ($(DEB_HOST_ARCH),alpha) ++# patch -p1 < debian/patches/alpha-float-const.diff ++#endif ++ ++ $(MAKE) -f debian/rules packaging-files ++ ++ mkdir -p stamps ++ touch $@ ++ ++ifeq ($(with_check),yes) ++ ifneq (,$(filter $(DEB_HOST_ARCH), $(jtreg_archs))) ++ with_jtreg_check = $(default_vm) ++ endif ++ ++ ifneq (,$(filter zero, $(alternate_vms))) ++ ifneq (,$(filter $(DEB_HOST_ARCH), arm64 amd64 i386)) ++ # only activate after testing; hangs several tests. ++ with_jtreg_check += zero ++ endif ++ endif ++endif ++ ++build_stamps = \ ++ stamps/build \ ++ $(if $(filter $(DEB_HOST_ARCH),$(altzero_archs)),stamps/zero-build) ++ ++build_stamps += stamps/jtreg-check-default ++ ++pre-build: ++ifeq ($(with_check),yes) ++ jtreg_version="$$(dpkg-query -f '$${Version}\n' -W $(jtreg_pkg))"; \ ++ if ! dpkg --compare-versions $(min_jtreg_version) le $$jtreg_version; then \ ++ echo "Error: testsuite requires $(jtreg_pkg) $(min_jtreg_version) but $$jtreg_version is installed"; \ ++ echo "Please update the $(jtreg_pkg) dependency and regenerate debian/control"; \ ++ false; \ ++ fi ++endif ++ifneq (,$(filter $(DEB_HOST_ARCH),s390)) ++ @echo explicitely fail the build for $(DEB_HOST_ARCH), patches not updated ++#else ifneq (,$(filter $(DEB_HOST_ARCH),armel)) ++# @echo explicitely fail the build for $(DEB_HOST_ARCH), ARM assembler interpreter not yet ported ++ false ++endif ++ ++build-arch: build ++build-indep: build stamps/build-docs ++build: pre-build $(build_stamps) ++ if [ -f buildwatch.pid ]; then \ ++ pid=$$(cat buildwatch.pid); \ ++ kill -1 $$pid || :; sleep 1; kill -9 $$pid || :; \ ++ fi ++ rm -f buildwatch.pid ++ ++stamps/build: stamps/configure ++ @echo '========== $@ ==========' ++ifneq (,$(filter $(DEB_HOST_ARCH), alpha armel armhf ia64 m68k mips mipsel mips64 mips64el powerpc riscv64 s390x sh4 sparc sparc64)) ++ sh -c 'sh debian/buildwatch.sh $(CURDIR)/$(builddir) &' ++endif ++ if $(EXTRA_BUILD_ENV) $(MAKE) --no-print-directory -C $(builddir) $(build_target); then \ ++ : ; \ ++ else \ ++ if [ -f buildwatch.pid ]; then \ ++ pid=$$(cat buildwatch.pid); \ ++ kill -1 $$pid || :; sleep 1; kill -9 $$pid || :; \ ++ fi; \ ++ false; \ ++ fi ++ ++ touch $@ ++ ++stamps/zero-build: stamps/zero-configure ++ @echo '========== $@ ==========' ++ : # FIXME: build using the just built jdk, and build the vm only ++ $(EXTRA_BUILD_ENV) $(MAKE) --no-print-directory -C $(zbuilddir) $(zbuild_target) ++ rm -rf $(builddir)/$(sdkimg)/lib/zero ++ cp -a $(zbuilddir)/$(zsdkimg)/lib/server \ ++ $(builddir)/$(sdkimg)/lib/zero ++ touch $@ ++ ++stamps/build-docs: stamps/build ++ @echo '========== $@ ==========' ++ mkdir -p $(builddir)/images/docs ++ echo ' ' > $(builddir)/images/docs/index.html ++ifeq ($(with_docs),yes) ++ $(MAKE) -C $(builddir) docs ++endif ++ touch $@ ++ ++stamps/jtreg-check-default: stamps/build ++ $(MAKE) -f debian/rules jtreg-run-check VMNAME=$(default_vm) VMARGS= \ ++ $(if $(filter $(default_vm),hotspot),TEST_SUITES='hotspot langtools jaxp jdk',TEST_SUITES='hotspot langtools jaxp') ++# TEST_SUITES='hotspot langtools jaxp' ++ touch $@ ++ ++stamps/jtreg-check-zero: stamps/build ++ $(MAKE) -f debian/rules jtreg-run-check VMNAME=zero VMARGS='-vmoption:-zero' \ ++ TEST_SUITES='hotspot langtools jaxp' ++ touch $@ ++ ++jtreg-run-check: ++ mkdir -p jtreg-test-output ++ifneq (,$(filter $(VMNAME), $(with_jtreg_check))) ++ if [ ! -f $(builddir)/$(sdkimg)/conf/security/java.security.install ]; then \ ++ fgrep -v 'sun.security.pkcs11' $(builddir)/$(sdkimg)/conf/security/java.security \ ++ > $(builddir)/$(sdkimg)/conf/security/java.security.test; \ ++ mv $(builddir)/$(sdkimg)/conf/security/java.security \ ++ $(builddir)/$(sdkimg)/conf/security/java.security.install; \ ++ mv $(builddir)/$(sdkimg)/conf/security/java.security.test \ ++ $(builddir)/$(sdkimg)/conf/security/java.security; \ ++ fi ++ ++ @echo "BEGIN jtreg" ++ $(TIME) $(MAKE) -f debian/rules -k jtregcheck \ ++ $(if $(TEST_SUITES),TEST_SUITES="$(TEST_SUITES)") \ ++ JTREG_OPTIONS='$(VMARGS)' \ ++ 2>&1 | tee jtreg-test-output/jtreg_output-$(VMNAME) ++ @echo "END jtreg" ++ ++ if [ -f $(builddir)/$(sdkimg)/conf/security/java.security.install ]; then \ ++ mv $(builddir)/$(sdkimg)/conf/security/java.security.install \ ++ $(builddir)/$(sdkimg)/conf/security/java.security; \ ++ fi ++ ++ for i in check-hotspot.log check-jaxp.log check-jdk.log check-langtools.log jtreg-summary.log; do \ ++ if [ -f jtreg-test-output/$$i ]; then \ ++ mv jtreg-test-output/$$i jtreg-test-output/$${i%*.log}-$(VMNAME).log; \ ++ else \ ++ mkdir -p jtreg-test-output; \ ++ fi; \ ++ done ++ ++ @echo "BEGIN jtreg-summary-$(VMNAME)" ++ -cat jtreg-test-output/jtreg-summary-$(VMNAME).log ++ @echo "END jtreg-summary-$(VMNAME)" ++ ++ -for i in hotspot langtools jaxp jdk; do \ ++ find jtreg-test-output/$$i/JTwork/ -name '*.jtr'; \ ++ done | sort -u > jtreg-test-output/failed_tests-$(VMNAME).list; \ ++ GZIP=-9vn tar --ignore-failed-read -C . -c -z -f jtreg-test-output/failed_tests-$(VMNAME).tar.gz -T jtreg-test-output/failed_tests-$(VMNAME).list ++ GZIP=-9vn tar -C . -c -z -f jtreg-test-output/jtreport-$(VMNAME).tar.gz $$(find jtreg-test-output -name JTreport) ++else ++ echo "jtreg harness not run for this build" > jtreg-test-output/jtreg_output-$(VMNAME) ++endif ++ ++# ---------------------------------------------------------------------------- ++# jtreg targets taken from IcedTea ++ ++TESTS_TO_RUN = $(addprefix check-,$(TEST_SUITES)) ++ ++check-hotspot: stamps/build ++ @echo '===== START $@ =====' ++ mkdir -p jtreg-test-output/hotspot ++ BOOTJDK_HOME=$(BOOTJDK_HOME) JTREG_OPTIONS=$(JTREG_OPTIONS) VMNAME=$(VMNAME) JDK_TO_TEST=$(builddir)/$(sdkimg) \ ++ AUTOPKGTEST_TMP=/tmp/ AUTOPKGTEST_ARTIFACTS=jtreg-test-output/ \ ++ $(TIME) debian/tests/hotspot | tee jtreg-test-output/$@.log ++ @echo '===== END $@ =====' ++ ++check-langtools: stamps/build ++ @echo '===== START $@ =====' ++ mkdir -p jtreg-test-output/langtools ++ BOOTJDK_HOME=$(BOOTJDK_HOME) JTREG_OPTIONS=$(JTREG_OPTIONS) VMNAME=$(VMNAME) JDK_TO_TEST=$(builddir)/$(sdkimg) \ ++ AUTOPKGTEST_TMP=/tmp/ AUTOPKGTEST_ARTIFACTS=jtreg-test-output/ \ ++ $(TIME) debian/tests/langtools | tee jtreg-test-output/$@.log ++ @echo '===== END $@ =====' ++ ++check-jaxp: stamps/build ++ @echo '===== START $@ =====' ++ mkdir -p jtreg-test-output/jaxp ++ BOOTJDK_HOME=$(BOOTJDK_HOME) JTREG_OPTIONS=$(JTREG_OPTIONS) VMNAME=$(VMNAME) JDK_TO_TEST=$(builddir)/$(sdkimg) \ ++ AUTOPKGTEST_TMP=/tmp/ AUTOPKGTEST_ARTIFACTS=jtreg-test-output/ \ ++ $(TIME) debian/tests/jaxp | tee jtreg-test-output/$@.log ++ @echo '===== END $@ =====' ++ ++check-jdk: stamps/build ++ @echo '===== START $@ =====' ++ mkdir -p jtreg-test-output/jdk ++ BOOTJDK_HOME=$(BOOTJDK_HOME) JTREG_OPTIONS=$(JTREG_OPTIONS) VMNAME=$(VMNAME) JDK_TO_TEST=$(builddir)/$(sdkimg) \ ++ AUTOPKGTEST_TMP=/tmp/ AUTOPKGTEST_ARTIFACTS=jtreg-test-output/ \ ++ $(TIME) debian/tests/jdk | tee jtreg-test-output/$@.log ++ @echo '===== END $@ =====' ++ ++jtregcheck: $(TESTS_TO_RUN) ++ for i in $(TEST_SUITES); do \ ++ echo "--------------- jtreg console summary for $$i ---------------"; \ ++ egrep -v '^(Passed:|Directory|Re[a-z]+\ written\ to)' jtreg-test-output/check-$$i.log; \ ++ done | tee jtreg-test-output/jtreg-summary.log ++ ++# ---------------------------------------------------------------------------- ++ ++clean: debian-clean ++ dh_testdir ++ dh_testroot ++ rm -rf stamps build build-* jtreg-test-output ++ rm -rf autom4te.cache ++ rm -f bin/{jtreg.tmwrc,my-jtreg-run,my-xvfb-run,auto*,ifnames} ++ rm -f buildwatch.pid ++ rm -f config.log configure.log ++ dh_clean ++ ++debian-clean: ++ dh_testdir ++ dh_testroot ++ dh_prep ++ for f in debian/*.in; do \ ++ f2=$$(echo $$f | sed 's/JB/$(basename)/;s/\.in$$//'); \ ++ case "$$f2" in debian/control) continue; esac; \ ++ rm -f $$f2; \ ++ done ++ rm -f debian/*.install debian/*.links debian/*.debhelper.log ++ ++lib_ext_dirs = common $(sort $(foreach arch,$(arch_map),$(firstword $(subst =,$(SPACE),$(arch))))) ++ifeq ($(derivative),Ubuntu) ++ lib_ext_dirs := $(filter-out arm hppa m68k mips% s390% sh%, $(lib_ext_dirs)) ++else ++ lib_ext_dirs := $(filter-out arm hppa, $(lib_ext_dirs)) ++endif ++ ++install: packaging-files ++ dh_testdir ++ dh_testroot ++ dh_prep || dh_clean -k ++ rm -f debian/*.install debian/*.links ++ dh_installdirs ++ ++ @echo "all_jre_tools: $(all_jre_tools)" ++ @echo "jre_tools: $(jre_tools)" ++ @echo "jre_hl_tools: $(jre_hl_tools)" ++ @echo "all_jdk_tools: $(all_jdk_tools)" ++ @echo "jdk_tools: $(jdk_tools)" ++ ++ : # install into temporary location ++ mkdir -p $(d)/$(basedir) ++ cp -a $(builddir)/$(sdkimg)/* $(d)/$(basedir)/ ++ ++ chmod -R u+w $(d) ++ ++ : # use javaws from icedtea-netx ++ find $(d) -name 'javaws*' | xargs -r rm -f ++ ++ : # install default jvm config file ++ ls -l $(d)/$(basedir)/lib ++ cp debian/jvm.cfg-default $(d)/$(basedir)/lib/. ++ ++ : # compress manpages ++ find $(d)/$(basedir)/man -type f ! -type l | xargs gzip -9v -n ++ ++ : # don't install a second copy ++ rm -vf $(d)/$(basedir)/conf/security/java.security.install ++ ++ifeq ($(with_debugedit),yes) ++ : # rewrite build-id ++ for i in $$(find $(d)/$(basedir) -type f | xargs -r file | sed -n '/ELF/s/:.*//p'); do \ ++ debugedit --build-id --build-id-seed='$(build_id_seed)' $$i; \ ++ done ++endif ++ ++ mkdir -p $(d)/$(etcdir)/security ++ mkdir -p $(d)/$(etcdir)/security/policy/limited ++ mkdir -p $(d)/$(etcdir)/security/policy/unlimited ++ mkdir -p $(d)/$(etcdir)/management ++ mkdir -p $(d)/$(etcdir)/jfr ++ ++ifeq (0,1) ++ : # rename templates (comments only) to config files, ++ : # and move to /$(etcdir) ++ for i in \ ++ conf/management/jmxremote.password \ ++ conf/management/snmp.acl \ ++ conf/sdp/sdp.conf; \ ++ do \ ++ mv $(d)/$(basedir)/$$i.template $(d)/$(etcdir)/$$i; \ ++ done ++else ++ : # FIXME: what to do about these? ++ for i in \ ++ conf/management/jmxremote.password \ ++ conf/management/snmp.acl \ ++ conf/sdp/sdp.conf; \ ++ do \ ++ rm -f $(d)/$(basedir)/$$i.template; \ ++ done ++endif ++ ++ifeq ($(with_bridge),atk) ++# only add releases that are known to work with atk ++# by default leave atk disabled ++# ifneq (,$(filter $(distrel),)) ++# cp -p debian/accessibility-atk.properties.enabled \ ++ $(d)/$(basedir)/conf/accessibility.properties ++# else ++ cp -p debian/accessibility-atk.properties.disabled \ ++ $(d)/$(basedir)/conf/accessibility.properties ++# endif ++else ++ cp -p debian/accessibility.properties $(d)/$(basedir)/conf/ ++endif ++ cp -p debian/swing.properties $(d)/$(basedir)/conf/ ++ ++ifneq (,$(pkg_certs)) ++ rm -f $(d)/$(basedir)/lib/security/cacerts ++endif ++ ++ : # FIXME: merge nss changes from IcedTea ++ cp debian/nss.cfg $(d)/$(basedir)/conf/security/nss.cfg ++ ++ : # move config files to $(etcdir) and symlink them. ++ rm -f $(d_jrehl).links ++ for i in \ ++ conf/accessibility.properties \ ++ conf/logging.properties \ ++ conf/sound.properties \ ++ conf/net.properties \ ++ lib/psfontj2d.properties \ ++ lib/psfont.properties.ja \ ++ lib/jfr/default.jfc \ ++ lib/jfr/profile.jfc \ ++ conf/swing.properties \ ++ conf/management/jmxremote.access \ ++ conf/management/management.properties \ ++ conf/security/java.policy \ ++ conf/security/java.security \ ++ conf/security/policy/limited/default_local.policy \ ++ conf/security/policy/limited/exempt_local.policy \ ++ conf/security/policy/limited/default_US_export.policy \ ++ conf/security/policy/unlimited/default_local.policy \ ++ conf/security/policy/unlimited/default_US_export.policy \ ++ conf/security/policy/README.txt \ ++ $$([ -f $(d)/$(basedir)/conf/security/nss.cfg ] && echo conf/security/nss.cfg) \ ++ $(if $(findstring ca-cert,$(pkg_certs)),,lib/security/cacerts) \ ++ lib/security/default.policy \ ++ lib/security/blocked.certs \ ++ lib/security/public_suffix_list.dat; \ ++ do \ ++ dest=$$(echo $$i | sed 's,^[^/]*/,,'); \ ++ mv $(d)/$(basedir)/$$i $(d)/$(etcdir)/$$dest; \ ++ echo "$(etcdir)/$$dest $(basedir)/$$i" >> $(d_jrehl).links; \ ++ done ++ mv $(d)/$(basedir)/lib/jvm.cfg \ ++ $(d)/$(etcdir)/ ++ifneq (,$(filter $(DEB_HOST_ARCH), $(hotspot_archs))) ++ printf -- '-zero KNOWN\n' >> $(d)/$(etcdir)/jvm.cfg ++else ++ printf -- '-zero ALIASED_TO -server\n' >> $(d)/$(etcdir)/jvm.cfg ++endif ++ifneq (,$(filter $(DEB_HOST_ARCH), amd64 i386)) ++ printf -- '-dcevm KNOWN\n' >> $(d)/$(etcdir)/jvm.cfg ++endif ++ ++ : # remove files which we do not want to distribute ++ ++ : # remove empty directories ++ ++ : # FIXME: fix, move and install tapset files ++ rm -rf $(d)/$(basedir)/tapset ++ ++ : # TODO: why do we provide a custom font.properties.ja? ++ : # cp -p debian/font.properties.ja $(d_jbin)/$(etcdir)/. ++# cp debian/font.properties.wqy_zenhai \ ++# $(d)/$(etcdir)/font.properties.ja ++ ++ ++ : # now move things to the packages. it is so ****** to create ++ : # .install files first. dh_movefiles did do the job perfectly ++ ++ : # $(p_jrehl).install / $(p_jre).install ++ ( \ ++ echo 'etc'; \ ++ echo '$(basedir)/lib/server'; \ ++ echo '$(basedir)/lib/jvm.cfg-default'; \ ++ echo '$(basedir)/lib/libjli.so'; \ ++ echo '$(basedir)/lib/ct.sym'; \ ++ echo '$(basedir)/lib/jexec'; \ ++ echo '$(basedir)/lib/jspawnhelper'; \ ++ echo '$(basedir)/lib/jrt-fs.jar'; \ ++ echo '$(basedir)/release'; \ ++ echo '$(basedir)/lib/tzdb.dat'; \ ++ if [ "$(default_vm)" = hotspot ]; then \ ++ echo '$(basedir)/lib/classlist'; \ ++ fi; \ ++ echo '$(basedir)/lib/modules'; \ ++ ) > debian/$(p_jrehl).install ++ifneq (,$(filter $(DEB_HOST_ARCH),i386)) ++ ifneq (,$(findstring --with-jvm-variants=client,$(DEFAULT_CONFIGURE_ARGS))) ++ echo '$(basedir)/lib/client' >> debian/$(p_jrehl).install ++ endif ++endif ++ifeq ($(derivative),Ubuntu) ++ ( \ ++ echo 'usr/share/apport/package-hooks'; \ ++ ) >> debian/$(p_jrehl).install ++endif ++ ++ $(RM) debian/$(p_jre).install ++ ++ : # jre binaries and man pages ++ for i in $(jre_hl_tools); do \ ++ case "$$i" in \ ++ *policytool*) p=debian/$(p_jre).install;; \ ++ *) p=debian/$(p_jrehl).install; \ ++ esac; \ ++ echo $(basedir)/bin/$$i >> $$p; \ ++ if [ -f $(d)/$(basedir)/man/man1/$$i.1.gz ]; then \ ++ echo $(basedir)/man/man1/$$i.1.gz >> $$p; \ ++ fi; \ ++ if [ -f $(d)/$(basedir)/man/ja_JP.UTF-8/man1/$$i.1.gz ]; then \ ++ echo $(basedir)/man/ja_JP.UTF-8/man1/$$i.1.gz >> $$p; \ ++ fi; \ ++ done ++ ++ cd $(CURDIR)/$(d); \ ++ for i in $(basedir)/lib/*.so; do \ ++ case "$$i" in \ ++ */libawt_xawt.so|*/libjawt.so|*/libsplashscreen.so|*/xawt) echo $$i >> ../$(p_jre).install;; \ ++ */zero) echo $$i >> ../$(p_jrez).install;; \ ++ *) echo $$i >> ../$(p_jrehl).install; \ ++ esac; \ ++ done ++ ++ @echo XXXXX $(alternate_vms) ++ifneq (,$(filter zero, $(alternate_vms))) ++ echo '$(basedir)/lib/zero' >> debian/$(p_jrez).install ++endif ++ ++ : # $(p_jrehl).install ++ ( \ ++ echo '$(basedir)/legal'; \ ++ if [ -d $(d)/$(basedir)/man/ja ]; then \ ++ echo '$(basedir)/man/ja'; \ ++ fi; \ ++ ) >> debian/$(p_jrehl).install ++ ++ : # $(p_jdkhl).install ++ ( \ ++ for i in $(jdk_hl_tools); do \ ++ echo $(basedir)/bin/$$i; \ ++ if [ -f $(d)/$(basedir)/man/man1/$$i.1.gz ]; then \ ++ echo $(basedir)/man/man1/$$i.1.gz; \ ++ fi; \ ++ if [ -f $(d)/$(basedir)/man/ja_JP.UTF-8/man1/$$i.1.gz ]; then \ ++ echo $(basedir)/man/ja_JP.UTF-8/man1/$$i.1.gz; \ ++ fi; \ ++ done; \ ++ cd $(d); \ ++ for i in $(basedir)/include/*.h; do \ ++ case $$i in */jawt.h|*/jawt_md.h) continue; esac; \ ++ [ -h $$i ] && continue; \ ++ echo $$i; \ ++ done; \ ++ echo '$(basedir)/include/*/jni_md.h'; \ ++ echo '$(basedir)/jmods'; \ ++ ) > debian/$(p_jdkhl).install ++ ++ : # $(p_jdk).install ++ ( \ ++ echo '$(basedir)/include/jawt.h'; \ ++ echo '$(basedir)/include/*/jawt_md.h'; \ ++ for i in $(jdk_tools); do \ ++ echo $(basedir)/bin/$$i; \ ++ if [ -f $(d)/$(basedir)/man/man1/$$i.1.gz ]; then \ ++ echo $(basedir)/man/man1/$$i.1.gz; \ ++ fi; \ ++ if [ -f $(d)/$(basedir)/man/ja_JP.UTF-8/man1/$$i.1.gz ]; then \ ++ echo $(basedir)/man/ja_JP.UTF-8/man1/$$i.1.gz; \ ++ fi; \ ++ done; \ ++ ) > debian/$(p_jdk).install ++ ++ : # $(p_src).install ++ ( \ ++ echo '$(basedir)/lib/src.zip $(TOP)/$(basename)/lib'; \ ++ ) > debian/$(p_src).install ++ ++ : # copy apport hook ++ifeq ($(derivative),Ubuntu) ++ mkdir -p $(d)/usr/share/apport/package-hooks/ ++ cp debian/apport-hook.py $(d)/usr/share/apport/package-hooks/source_$(basename).py ++endif ++ ++ : # move demos and samples, create symlinks for $(p_demo) ++ mkdir -p $(d)/usr/share/doc/$(p_jrehl)/demo ++ for i in $(d)/$(basedir)/demo/*; do \ ++ b=$$(basename $$i); \ ++ case "$$i" in \ ++ */jvmti) \ ++ echo $(basedir)/demo/$$b usr/share/doc/$(p_jrehl)/demo/$$b >> $(d_demo).links;; \ ++ *) \ ++ mv $$i $(d)/usr/share/doc/$(p_jrehl)/demo/$$b; \ ++ echo usr/share/doc/$(p_jrehl)/demo/$$b $(basedir)/demo/$$b >> $(d_demo).links;; \ ++ esac; \ ++ done ++ echo usr/share/doc/$(p_jre) usr/share/doc/$(p_demo) >> $(d_demo).links ++ ++ : # $(p_demo).install ++ ( \ ++ echo '$(basedir)/demo'; \ ++ echo 'usr/share/doc/$(p_jrehl)/demo'; \ ++ ) > debian/$(p_demo).install ++ ++ : # $(p_dbg).install ++ifneq (,$(wildcard $(builddir)/$(sdkimg)/lib/*.diz)) ++ ( \ ++ echo '$(basedir)/lib/*.diz'; \ ++ echo '$(basedir)/lib/*/*.diz'; \ ++ ) > debian/$(p_dbg).install ++endif ++ ++# the logic here -> move external debuginfo files into usr/lib/debug ++# check binaries and if the debuginfo is found, check if the binary has build id ++# debug information with build ids is moved under usr/lib/debug/.build-id ++# since we rewrite the build id with_debugedit=yes, patch the build id inside ++# the debug symbols file ++# otherwise (no buildid) just lib debuginfo file where it is ++ mkdir -p $(d)/usr/lib/debug ++ (cd $(d) && find . -path './$(basedir)/*' -name '*.debuginfo' | cpio -pdm usr/lib/debug/) ++ find $(d)/$(basedir)/ -name '*.debuginfo' -exec rm {} \; ++ echo > debian/$(p_dbg).install ++ for binary in `(cd $(d) && find . -path './$(basedir)/*' -print)`; do \ ++ dir=`dirname $$binary`; \ ++ base=`basename $$binary .so`; \ ++ debuginfo=usr/lib/debug/$$dir/$$base.debuginfo; \ ++ [ -f $(d)/$$debuginfo ] || continue; \ ++ $(OBJCOPY) --compress-debug-sections $(d)/$$debuginfo; \ ++ b_id=$$(LC_ALL=C readelf -n $(d)/$$binary | sed -n 's/ *Build ID: *\([0-9a-f][0-9a-f]*\)/\1/p'); \ ++ if [ -z "$$b_id" ]; then \ ++ echo $$debuginfo >> debian/$(p_dbg).install; \ ++ else \ ++ debugdir=usr/lib/debug/.build-id/$${b_id:0:2}; \ ++ f=$${b_id:2}.debug; \ ++ mkdir -p $(d)/$$debugdir; \ ++ mv $(d)/$$debuginfo $(d)/$$debugdir/$$f; \ ++ chmod 644 $(d)/$$debugdir/$$f; \ ++ echo $$debugdir/$$f >> debian/$(p_dbg).install; \ ++ notes=$$(mktemp); \ ++ $(OBJCOPY) --dump-section=.note.gnu.build-id=$$notes $(d)/$$binary; \ ++ $(OBJCOPY) --update-section=.note.gnu.build-id=$$notes $(d)/$$debugdir/$$f; \ ++ rm $$notes; \ ++ fi; \ ++ if [[ "$(DEB_BUILD_OPTIONS)" != *nostrip* ]]; then \ ++ $(OBJCOPY) --remove-section=.comment --remove-section=.note $(d)/$$binary; \ ++ if [[ "$$binary" != *.so ]]; then \ ++ $(STRIP) --strip-unneeded $(d)/$$binary; \ ++ fi; \ ++ fi; \ ++ done; ++ find $(d)/usr/lib/debug/ -type d -empty -delete ++ ++# : # FIXME: the targets are not installed ... ++# rm -rfv $(d)/$(basedir)/lib/jfr ++ ++ dh_install --sourcedir=debian/tmp -XLICENSE ++ifeq (,$(findstring old, $(old_debhelper))) ++ dh_missing --sourcedir=debian/tmp --fail-missing -XLICENSE ++endif ++ ++ find $(d_jrehl) $(d_jre) $(d_jrez) \ ++ \( -name '*.debuginfo' -o -name '*.diz' \) -print | xargs -r rm -f ++ ++ifeq (0,1) ++# FIXME: ext no longer supported ++ : # give all permissions to cross-VM extension directory ++ sed -i \ ++ -e '/permission java\.security\.AllPermission;/,/};/c\' \ ++ -e ' permission java.security.AllPermission;\' \ ++ -e '};\' \ ++ -e '\' \ ++ $(if $(DEB_HOST_MULTIARCH),$(foreach i, $(lib_ext_dirs), \ ++ -e 'grant codeBase "file:/$(TOP)/java-$(shortver)-$(origin)-$(i)/lib/ext/*" {\' \ ++ -e ' permission java.security.AllPermission;\' \ ++ -e '};\')) \ ++ -e '\' \ ++ -e '// Comment this out if you want to give all permissions to the\' \ ++ -e '// Debian Java repository too:\' \ ++ -e '//grant codeBase "file:/usr/share/java/repository/-" {\' \ ++ -e '// permission java.security.AllPermission;\' \ ++ -e '//};\' \ ++ -e '' \ ++ $(d_jrehl)/$(security)/java.policy ++# 'make Emacs Makefile mode happy ++endif ++ ++ dh_installdirs -p$(p_jrehl) \ ++ usr/share/doc/$(p_jrehl) \ ++ usr/share/binfmts ++ ++ dh_installdirs -p$(p_jre) \ ++ usr/share/applications \ ++ usr/share/application-registry \ ++ usr/share/mime-info \ ++ usr/share/pixmaps ++ ++ : # add GNOME stuff ++ifneq (,$(findstring cautious-launcher, $(java_launcher))) ++ cp -p debian/$(basename)-java.desktop \ ++ $(d_jre)/usr/share/applications/ ++endif ++ ++ for i in archive; do \ ++ cp debian/$(basename)-$$i.applications \ ++ $(d_jre)/usr/share/application-registry/; \ ++ cp debian/$(basename)-$$i.keys $(d_jre)/usr/share/mime-info/; \ ++ cp debian/$(basename)-$$i.mime $(d_jre)/usr/share/mime-info/; \ ++ done ++ ++ cp -p debian/sun_java.xpm \ ++ $(d_jre)/usr/share/pixmaps/$(basename).xpm ++ ++ mkdir -p $(d_demo)/usr/share/pixmaps ++ cp -p debian/sun_java_app.xpm \ ++ $(d_demo)/usr/share/pixmaps/$(basename)-app.xpm ++ ++ : # install icons ++ for i in 16 24 32 48; do \ ++ install -D -m 644 -p src/java.desktop/unix/classes/sun/awt/X11/java-icon$${i}.png \ ++ $(d_jre)/usr/share/icons/hicolor/$${i}x$${i}/apps/$(basename).png; \ ++ done ++ ++ : # create docdir symlinks for $(p_jrehl) ++ ( \ ++ echo usr/share/doc/$(p_jrehl) $(basedir)/docs; \ ++ ) >> $(d_jrehl).links ++ ++ : # create docdir symlinks for $(p_jre) ++ ( \ ++ echo usr/share/doc/$(p_jrehl) usr/share/doc/$(p_jre); \ ++ ) > $(d_jre).links ++ ++ifneq (,$(filter zero, $(alternate_vms))) ++ : # create docdir symlinks for $(p_jrez) ++ ( \ ++ echo usr/share/doc/$(p_jrehl) usr/share/doc/$(p_jrez); \ ++ ) > $(d_jrez).links ++endif ++ ++ : # create docdir symlinks for $(p_src) ++ ( \ ++ echo usr/share/doc/$(p_jdk) usr/share/doc/$(p_src); \ ++ ) > $(d_src).links ++ ++ : # create docdir symlinks for $(p_jdkhl) ++ ( \ ++ echo usr/share/doc/$(p_jrehl) usr/share/doc/$(p_jdkhl); \ ++ ) > $(d_jdkhl).links ++ ++ : # create docdir symlinks for $(p_jdk) ++ ( \ ++ echo usr/share/doc/$(p_jre) usr/share/doc/$(p_jdk); \ ++ echo $(TOP)/$(basename)/lib/src.zip $(TOP)/$(basename)/src.zip; \ ++ ) > $(d_jdk).links ++# doesn't work, no package dependency ++ifneq (,$(DEB_HOST_MULTIARCH)) ++ echo '$(TOP)/$(basename)/src.zip $(basedir)/lib/src.zip' >> $(d_jdkhl).links ++ echo '$(TOP)/$(basename)/lib/src.zip $(basedir)/lib/src.zip' >> $(d_jdkhl).links ++endif ++ ++ : # create docdir symlinks for $(p_dbg) ++ ( \ ++ echo usr/share/doc/$(p_jrehl) usr/share/doc/$(p_dbg); \ ++ ) > $(d_dbg).links ++ ++ : # OpenJDK Stack Unwinder and Frame Decorator ++ifneq (,$(filter $(DEB_HOST_ARCH),amd64)) ++ mkdir -p $(d_jrehl)/usr/share/gdb/auto-load/$(basedir)/jre/lib/server ++ cp debian/dbg.py \ ++ $(d_jrehl)/usr/share/gdb/auto-load/$(basedir)/jre/lib/server/libjvm.so-gdb.py ++ ifneq (,$(filter zero, $(alternate_vms))) ++ mkdir -p $(d_jrez)/usr/share/gdb/auto-load/$(basedir)/jre/lib/zero ++ ln -sf ../server/libjvm.so-gdb.py \ ++ $(d_jrez)/usr/share/gdb/auto-load/$(basedir)/jre/lib/zero/libjvm.so ++ endif ++endif ++ ++ifeq ($(with_bridge),atk) ++ : # create links for the atk wrapper ++ ifeq (0,1) ++ # FIXME: ext no longer supported ++ echo "usr/share/java/java-atk-wrapper.jar $(basedir)/lib/ext/java-atk-wrapper.jar" \ ++ >> $(d_jre).links ++ endif ++ echo "usr/lib$(multiarch_dir)/jni/libatk-wrapper.so $(basedir)/lib/libatk-wrapper.so" \ ++ >> $(d_jre).links ++else ifeq ($(with_bridge),yes) ++ ifeq (0,1) ++# FIXME: ext no longer supported ++ : # create links for the gnome accessibility bridge ++ echo "usr/share/java/gnome-java-bridge.jar $(basedir)/lib/ext/gnome-java-bridge.jar" \ ++ >> $(d_jre).links ++ ifeq ($(with_jni_bridge),yes) ++ echo "usr/lib$(multiarch_dir)/jni/libjava-access-bridge-jni.so $(basedir)/lib/libjava-access-bridge-jni.so" \ ++ >> $(d_jre).links ++ endif ++ endif ++endif ++ ++ : # create links for the config/lib files ++# note: $(d_jrehl).links already created when moving files to the etcdir ++ifneq (,$(DEB_HOST_MULTIARCH)) ++ mv $(d_jrehl)/$(etcdir)/jvm.cfg \ ++ $(d_jrehl)/$(etcdir)/jvm-$(DEB_HOST_ARCH).cfg ++ ( \ ++ echo "$(etcdir)/jvm-$(DEB_HOST_ARCH).cfg $(basedir)/lib/jvm.cfg"; \ ++ ) >> $(d_jrehl).links ++else ++ ( \ ++ echo "$(etcdir)/jvm.cfg $(basedir)/lib/jvm.cfg"; \ ++ ) >> $(d_jrehl).links ++endif ++ifneq (,$(pkg_certs)) ++ echo "etc/ssl/certs/java/cacerts $(basedir)/lib/security/cacerts" \ ++ >> $(d_jrehl).links ++endif ++ ++ @echo JRE_HL_TOOLS: $(jre_hl_tools) jexec ++ @echo JRE_TOOLS: $(jre_tools) ++ @echo JDK_HL_TOOLS: $(jdk_hl_tools) ++ @echo JDK_TOOLS: $(jdk_tools) ++ ( \ ++ echo 'name=$(jdirname)'; \ ++ echo 'alias=$(jdiralias)'; \ ++ echo 'priority=$(priority)'; \ ++ echo 'section=main'; \ ++ echo ''; \ ++ for i in $(jre_hl_tools); do \ ++ echo "hl $$i /$(basedir)/bin/$$i"; \ ++ done; \ ++ echo "hl jexec /$(basedir)/lib/jexec"; \ ++ for i in $(jre_tools); do \ ++ echo "jre $$i /$(basedir)/bin/$$i"; \ ++ done; \ ++ for i in $(jdk_hl_tools); do \ ++ echo "jdkhl $$i /$(basedir)/bin/$$i"; \ ++ done; \ ++ for i in $(jdk_tools); do \ ++ echo "jdk $$i /$(basedir)/bin/$$i"; \ ++ done; \ ++ ) > $(d_jrehl)/$(TOP)/.$(jdiralias).jinfo ++ ++ ( \ ++ echo 'package $(basename)'; \ ++ echo 'interpreter /usr/bin/jexec'; \ ++ echo 'magic PK\x03\x04'; \ ++ ) > $(d_jrehl)/$(basedir)/lib/jar.binfmt ++ ++ : # another jvm symlink ++ ln -sf $(jdirname) $(d_jrehl)/usr/lib/jvm/$(jdiralias) ++ : # the debug link only needed for debhelper << 9 ++ mkdir -p $(d_jrehl)/usr/lib/debug/usr/lib/jvm/$(jdirname) ++ ln -sf $(jdirname) $(d_jrehl)/usr/lib/debug/usr/lib/jvm/$(jdiralias) ++ ++ifeq ($(with_systemtap),yes FIXME) ++ : # systemtap support ++ mkdir -p $(d_jrehl)/usr/share/systemtap/tapset ++ cp -p build/tapset/hotspot.stp $(d_jrehl)/usr/share/systemtap/tapset/ ++endif ++ ++ : # install lintian overrides ++ for FILE in debian/*.overrides; do \ ++ PKG=`basename $$FILE .overrides`; \ ++ install -D -m644 $$FILE debian/$$PKG/usr/share/lintian/overrides/$$PKG; \ ++ done ++ ++nodocs = $(if $(findstring nodoc, $(DEB_BUILD_OPTIONS)),-N$(p_doc)) ++nojrez = $(if $(filter zero, $(alternate_vms)),,-N$(p_jrez)) ++ ++# Build architecture independant packages ++binary-indep: build-indep install ++ dh_testdir ++ dh_testroot ++ifeq ($(with_docs),yes) ++ dh_installchangelogs -p$(p_doc) ++ dh_installdocs -p$(p_doc) ++ mkdir -p $(d_doc)/usr/share/doc/$(p_jrehl) ++ for i in $(builddir)/images/docs/*; do \ ++ [ -e $$i ] || continue; \ ++ b=$$(basename $$i); \ ++ cp -a $$i $(d_doc)/usr/share/doc/$(p_jrehl)/; \ ++ ln -sf ../$(p_jrehl)/$$b $(d_doc)/usr/share/doc/$(p_doc)/$$b; \ ++ done ++ rm -v $(d_doc)/usr/share/doc/$(p_jrehl)/api/script-dir/{jquery-3.6.1.min.js,jquery-ui.min.css,jquery-ui.min.js} ++ dh_link -p$(p_doc) \ ++ /usr/share/javascript/jquery-ui/jquery-ui.min.js \ ++ /usr/share/doc/$(p_jrehl)/api/script-dir/jquery-3.6.1.min.js \ ++ /usr/share/javascript/jquery-ui/themes/base/jquery-ui.min.css \ ++ /usr/share/doc/$(p_jrehl)/api/script-dir/jquery-ui.min.css \ ++ /usr/share/javascript/jquery-ui/jquery-ui.min.js \ ++ /usr/share/doc/$(p_jrehl)/api/script-dir/jquery-ui.min.js ++ ++endif ++# FIXME: desktop and menu files not ready for multiarch. #658321 ++ifeq (,$(DEB_HOST_MULTIARCH)) ++ dh_installmenu -i $(nodocs) ++endif ++ -dh_icons -i $(nodocs) || dh_iconcache -i $(nodocs) ++# dh_installdebconf -i $(nodocs) ++ dh_link -i $(nodocs) ++ set -e; if which dh_strip_nondeterminism >/dev/null; then \ ++ dh_strip_nondeterminism -i $(nodocs); \ ++ fi ++ dh_compress -i $(nodocs) -Xexamples -Xdemo -Xpackage-list -Xelement-list ++ dh_fixperms -i $(nodocs) ++ dh_installdeb -i $(nodocs) ++ dh_gencontrol -i $(nodocs) -- $(control_vars) ++ dh_md5sums -i $(nodocs) ++ dh_builddeb -i $(nodocs) $(bd_options) ++ ++absarchdir = $(CURDIR)/$(d_jrehl)/$(basedir)/lib ++shlibdeps_ld_path =$(absarchdir):$(absarchdir)/client:$(absarchdir)/server:$(absarchdir)/native_threads ++ ++# pass vm name as first argument ++define install_test_results ++ mkdir -p $(d_jdk)/usr/share/doc/$(p_jrehl)/test-$(DEB_HOST_ARCH) ++ -cp jtreg-test-output/jtreg_output-$(1) \ ++ $(d_jdk)/usr/share/doc/$(p_jrehl)/test-$(DEB_HOST_ARCH)/jtreg_output-$(1).log ++ -cp jtreg-test-output/jtreg-summary-$(1).log jtreg-test-output/check-*-$(1).log \ ++ $(d_jdk)/usr/share/doc/$(p_jrehl)/test-$(DEB_HOST_ARCH)/ ++ -cp jtreg-test-output/failed_tests-$(1).tar.gz \ ++ $(d_jdk)/usr/share/doc/$(p_jrehl)/test-$(DEB_HOST_ARCH)/failed_tests-$(1).tar.gz ++ -cp jtreg-test-output/jtreport-$(1).tar.gz \ ++ $(d_jdk)/usr/share/doc/$(p_jrehl)/test-$(DEB_HOST_ARCH)/jtreport-$(1).tar.gz ++endef ++ ++# Build architecture dependant packages ++binary-arch: build install ++ dh_testdir ++ dh_testroot ++ ++ dh_installchangelogs -p$(p_jrehl) ++ dh_installdocs -p$(p_jrehl) \ ++ debian/JAVA_HOME \ ++ debian/README.alternatives \ ++ debian/README.Debian ++ $(call install_test_results,$(default_vm)) ++ ++ifneq (,$(filter zero, $(alternate_vms))) ++ $(call install_test_results,zero) ++endif ++# FIXME: desktop and menu files not ready for multiarch. #658321 ++ifeq (,$(DEB_HOST_MULTIARCH)) ++ dh_installmenu -a $(nodemo) $(nojrez) ++endif ++ -dh_icons -a $(nodemo) $(nojrez) \ ++ || dh_iconcache -a $(nodemo) $(nojrez) ++# dh_installdebconf -a $(nodemo) $(nojrez) ++ dh_link -a $(nodemo) $(nojrez) ++ set -e; if which dh_strip_nondeterminism >/dev/null; then \ ++ dh_strip_nondeterminism -a $(nodemo) $(nojrez); \ ++ fi ++# The strip operation is performed manually, because the build packages ++# external debug symbols (so that native libraries are included in the modules) ++# This step would override the actual debug symbols with the empty sections ++# dh_strip -v -a $(nodemo) $(nojrez) \ ++# -Xlibjvm.so -X.debuginfo --dbg-package=$(p_dbg) ++ ++ dh_compress -a $(nodemo) $(nojrez) -Xexamples -Xdemo -Xpackage-list ++ dh_fixperms -a $(nodemo) $(nojrez) ++ dh_makeshlibs -p$(p_jrehl) -p$(p_jre) ++ dh_shlibdeps -a $(nodemo) $(nojrez) -L $(p_jrehl) \ ++ -l$(shlibdeps_ld_path) \ ++ -- ++ find $(d_jrehl) $(d_jre) $(d_jrez) -type f -name '*.debuginfo' ++ dh_installdeb -a $(nodemo) $(nojrez) ++ dh_gencontrol -a $(nodemo) $(nojrez) \ ++ -- $(control_vars) ++ dh_md5sums -a $(nodemo) $(nojrez) ++ : # FIXME: something seems to hang here ... ++ -ps afx ++ NO_PKG_MANGLE=1 \ ++ dh_builddeb -a $(nodemo) $(nojrez) #$(bd_options) ++ ++is_release = yes ++git_project = jdk17u ++git_tag = jdk-17.0.8+7 ++package_version = $(subst jdk-,,$(git_tag)) ++package_version = $(shell echo $(PKGVERSION) | sed 's/-[^-][^-]*$$//') ++ifneq ($(is_release),yes) ++ package_version := $(subst +,~,$(package_version)) ++endif ++git_url = https://github.com/openjdk/$(git_project) ++origdir = ../$(basename)-$(package_version).orig ++topdir = $(basename)-$(package_version) ++get-orig: ++ mkdir $(origdir) ++ wget -O $(origdir)/jdk.tar.gz $(git_url)/archive/$(git_tag).tar.gz ++ ++ set -ex; \ ++ repo=jdk; \ ++ d=$$(tar tf $(origdir)/$$repo.tar.gz | head -1 | sed 's,/.*,,'); \ ++ tar xf $(origdir)/$$repo.tar.gz; \ ++ case $$repo in jdk) \ ++ rm -v -rf $$d/src/java.base/share/native/libzip/zlib; \ ++ find $$d/src/java.desktop/share/native/libjavajpeg \ ++ ! -name imageioJPEG.c ! -name jpegdecoder.c ! -name jinclude.h \ ++ -type f -print -delete; \ ++ rm -v -rf $$d/src/java.desktop/share/native/libsplashscreen/giflib; \ ++ rm -v -rf $$d/src/java.desktop/share/native/libsplashscreen/libpng; \ ++ rm -v -rf $$d/src/java.smartcardio/unix/native/libj2pcsc/MUSCLE; \ ++ esac; \ ++ mv $$d $(topdir); \ ++ chmod +x $(topdir)/configure; \ ++ rm -f $(topdir)/.hgtags; \ ++ rm -f $(topdir)/.gitattributes; \ ++ rm -rf $(topdir)/.github; \ ++ XZ_OPT=-9v tar cfJ ../$(basename)_$(package_version).orig.tar.xz $(topdir); \ ++ rm -rf $(topdir) ++ ++# keep these in the tarball, older lcms are not supported ++# rm -v -f $$d/src/java.desktop/share/native/liblcms/cms*.c; \ ++# rm -v -f $$d/src/java.desktop/share/native/liblcms/lcms2*.h; \ ++ ++ rm -rf $(origdir) ++ ++binary: binary-arch binary-indep ++.PHONY: build build-arch build-indep clean binary-indep binary-arch binary install packaging-files diff --cc debian/series index 000000000,000000000..024cad794 new file mode 100644 --- /dev/null +++ b/debian/series @@@ -1,0 -1,0 +1,26 @@@ ++autoconf-select.diff ++system-pcsclite.diff ++hotspot-mips-align.diff ++icc_loading_with_symlink.diff ++icedtea-override-redirect-compiz.patch ++jdk-freetypeScaler-crash.diff ++libpcsclite-dlopen.diff ++jexec.diff ++default-jvm-cfg.diff ++workaround_expand_exec_shield_cs_limit.diff ++adlc-parser.patch ++multiple-pkcs11-library-init.patch ++applet-hole.patch ++m68k-support.diff ++s390x-thread-stack-size.diff ++jdk-getAccessibleValue.diff ++zero-x32.diff ++jtreg-location.diff ++jdk-i18n-pt_BR.diff ++mips-sigset.diff ++disable-doclint-by-default.diff ++hotspot-disable-exec-shield-workaround.diff ++atk-wrapper-security.patch ++dnd-files.patch ++hotspot-powerpcspe.diff ++hotspot-libpath.diff diff --cc debian/sharedmimeinfo index 000000000,000000000..78604fdbe new file mode 100644 --- /dev/null +++ b/debian/sharedmimeinfo @@@ -1,0 -1,0 +1,12 @@@ ++ ++ ++ ++ Java Archive ++ ++ ++ ++ ++ Java Web Start application ++ ++ ++ diff --cc debian/source/format index 000000000,000000000..163aaf8d8 new file mode 100644 --- /dev/null +++ b/debian/source/format @@@ -1,0 -1,0 +1,1 @@@ ++3.0 (quilt) diff --cc debian/source/lintian-overrides index 000000000,000000000..194db8988 new file mode 100644 --- /dev/null +++ b/debian/source/lintian-overrides @@@ -1,0 -1,0 +1,2 @@@ ++# parts of the test suite, not installed ++openjdk-17 source: source-is-missing diff --cc debian/sun_java.xpm index 000000000,000000000..00db1b68c new file mode 100644 --- /dev/null +++ b/debian/sun_java.xpm @@@ -1,0 -1,0 +1,194 @@@ ++/* XPM */ ++static char *sun_java[] = { ++/* columns rows colors chars-per-pixel */ ++"32 32 156 2", ++" c #34479E", ++". c #4152A3", ++"X c #4153A2", ++"o c #4153A3", ++"O c #4D5EA8", ++"+ c #4D5EA9", ++"@ c #4E5EA9", ++"# c #5A69AD", ++"$ c #5A69AE", ++"% c #5A69AF", ++"& c #5A6AAD", ++"* c #6675B3", ++"= c #6775B2", ++"- c #6775B3", ++"; c #6776B3", ++": c #6775B5", ++"> c #6776B4", ++", c #7480B7", ++"< c #7380B8", ++"1 c #7381B8", ++"2 c #7380BA", ++"3 c #7481B8", ++"4 c #D12124", ++"5 c #D42F31", ++"6 c #D73D3E", ++"7 c #D94A4A", ++"8 c #D94A4B", ++"9 c #DA4A4A", ++"0 c #DA4B4B", ++"q c #DC5957", ++"w c #DC5858", ++"e c #DD5859", ++"r c #DD5959", ++"t c #DF6664", ++"y c #E06664", ++"u c #E27471", ++"i c #E27570", ++"p c #E27472", ++"a c #E37472", ++"s c #E27574", ++"d c #E37474", ++"f c #E5827E", ++"g c #E5837F", ++"h c #818BBD", ++"j c #818CBC", ++"k c #808CBE", ++"l c #818DC0", ++"z c #8D97C2", ++"x c #8D97C3", ++"c c #8D97C5", ++"v c #8E98C5", ++"b c #99A2C7", ++"n c #99A3C7", ++"m c #99A2C8", ++"M c #99A3C8", ++"N c #99A2CA", ++"B c #99A2CB", ++"V c #A6AECD", ++"C c #A5AED2", ++"Z c #A6AED0", ++"A c #B3B9D3", ++"S c #B2B9D4", ++"D c #B3B9D5", ++"F c #B2B9D6", ++"G c #B3BAD5", ++"H c #BFC4D6", ++"J c #BFC5D7", ++"K c #BFC5D8", ++"L c #BFC5D9", ++"P c #BFC5DA", ++"I c #BFC5DB", ++"U c #BFC5DC", ++"Y c #BFC5DD", ++"T c #E58381", ++"R c #E68380", ++"E c #E88F89", ++"W c #E88F8B", ++"Q c #E8908A", ++"! c #EB9D99", ++"~ c #EB9D9A", ++"^ c #EB9D9D", ++"/ c #EDABA2", ++"( c #EEABA6", ++") c #EDACA6", ++"_ c #F0B9B1", ++"` c #F1B9B1", ++"' c #F0B9B6", ++"] c #F3C7BD", ++"[ c #CCD0DA", ++"{ c #CCD1DA", ++"} c #CDD0DA", ++"| c #CCD0DC", ++" . c #CCD0DD", ++".. c #CCD1DD", ++"X. c #CCD0DE", ++"o. c #CCD1DE", ++"O. c #CCD1DF", ++"+. c #D8DBDF", ++"@. c #CCD0E0", ++"#. c #D8DCE1", ++"$. c #D9DCE1", ++"%. c #D9DCE3", ++"&. c #D9DCE4", ++"*. c #D9DCE5", ++"=. c #D9DCE6", ++"-. c #D9DCE7", ++";. c #D9DCE8", ++":. c #F6D5CB", ++">. c #F6D5CD", ++",. c #F6D5D3", ++"<. c #F9E3D7", ++"1. c #F9E3DA", ++"2. c #F9E3DB", ++"3. c #E6E7E7", ++"4. c #E5E7E9", ++"5. c #E5E7EC", ++"6. c #E5E8E8", ++"7. c #E6E8E9", ++"8. c #E6E8EA", ++"9. c #E6E8EB", ++"0. c #E5E8EC", ++"q. c #E6E8EC", ++"w. c #F9E3E1", ++"e. c #F2F3EC", ++"r. c #FCF0E1", ++"t. c #FCF0E3", ++"y. c #FCF1E5", ++"u. c #FCF0E7", ++"i. c #FCF1E6", ++"p. c #FCF1E7", ++"a. c #FCF1E8", ++"s. c #FFFEED", ++"d. c #FFFEEE", ++"f. c #FFFEEF", ++"g. c #F2F3F0", ++"h. c #F2F3F1", ++"j. c #F2F3F2", ++"k. c #F2F3F4", ++"l. c #FFFEF0", ++"z. c #FFFEF1", ++"x. c #FFFEF2", ++"c. c #FFFEF3", ++"v. c #FFFFF3", ++"b. c #FFFFF4", ++"n. c #FFFFF5", ++"m. c #FFFFF6", ++"M. c #FFFFF7", ++"N. c #FFFFF8", ++"B. c #FFFFF9", ++"V. c #FFFFFA", ++"C. c #FFFFFB", ++"Z. c #FFFFFC", ++"A. c #FFFFFD", ++"S. c #FFFFFE", ++"D. c gray100", ++"F. c None", ++/* pixels */ ++"F.F.F.F.F.F.F.F.F.F.F.F.F.F.F.F.F.F.F.F.F.F.F.F.F.F.F.F.F.F.F.F.", ++"F.F.F.F.F.F.F.F.F.F.F.F.F.F.F.F.F.w.~ F.F.F.F.F.F.F.F.F.F.F.F.F.", ++"F.F.F.F.F.F.F.F.F.F.F.F.F.F.F.F.F.F.r F.F.F.F.F.F.F.F.F.F.F.F.F.", ++"F.F.F.F.F.F.F.F.F.F.F.F.F.F.F.F.F.' q F.F.F.F.F.F.F.F.F.F.F.F.F.", ++"F.F.F.F.F.F.F.F.F.F.F.F.F.F.F.F.M.q u F.F.F.F.F.F.F.F.F.F.F.F.F.", ++"F.F.F.F.F.F.F.F.F.F.F.F.F.F.M.M.! 4 1.F.F.F.F.F.F.F.F.F.F.F.F.F.", ++"F.F.F.F.F.F.F.F.F.F.F.F.F.M.M.f 4 E M.M.F.w.F.F.F.F.F.F.F.F.F.F.", ++"F.F.F.F.F.F.F.F.F.F.F.F.M.i.t 4 i x.1.g u ,.F.F.F.F.F.F.F.F.F.F.", ++"F.F.F.F.F.F.F.F.F.F.F.M.>.7 4 f r.f 5 ! C.F.F.F.F.F.F.F.F.F.F.F.", ++"F.F.F.F.F.F.F.F.F.F.F.1.5 4 / r.7 5 >.M.F.F.F.F.F.F.F.F.F.F.F.F.", ++"F.F.F.F.F.F.F.F.F.F.M.q 4 E d.i 4 >.M.F.F.F.F.F.F.F.F.F.F.F.F.F.", ++"F.F.F.F.F.F.F.F.F.F.M.4 5 d.d.4 6 x.x.F.F.F.F.F.F.F.F.F.F.F.F.F.", ++"F.F.F.F.F.F.F.F.F.F.F.7 7 d.d.4 4 :.M.F.F.F.F.F.F.F.F.F.F.F.F.F.", ++"F.F.F.F.F.F.F.F.F.F.M./ 4 <.x.q 4 7 i.F.F.F.F.F.F.F.F.F.F.F.F.F.", ++"F.F.F.F.F.F.F.F.F.F.F.M.u t x.] 4 4 ) M.F.F.F.F.F.F.F.F.F.F.F.F.", ++"F.F.F.F.F.F.F.F.F.F.F.F.u.7 _ x.E 4 ! M.M.F.F.F.F.F.F.F.F.F.F.F.", ++"F.F.F.F.F.F.F.F.F.h.@.4.M.i.u i._ 5 i.M.F.-.N : c @.M.F.F.F.F.F.", ++"F.F.F.F.F.F.k.B O h P x.x.M.x.i.a :.x.M.9.L @.L O c M.F.F.F.F.", ++"F.F.F.F.F.F.> X c n b n n n n h = & X v M.M.M.8. M.F.F.F.F.", ++"F.F.F.F.F.F.F.+...N n n n N N n J ..4.M.M.M.x.M.M. M.F.F.F.F.", ++"F.F.F.F.F.F.F.M.+.O ..x.d.x.d.x.x.3.L 8.M.M.M.x. . X M.F.F.F.F.", ++"F.F.F.F.F.F.F.M.F X = = = = O 8.M.M.h.& S M.F.F.F.F.", ++"F.F.F.F.F.F.F.M.M...z = = = = = j V %.x.F.M.9.% S M.F.F.F.F.F.", ++"F.F.F.F.F.F.F.M.M.4.n d.d.d.d.x.d.x.M.M.M.S X l 5.F.M.F.F.F.F.F.", ++"F.F.F.F.F.F.M.M.x.= O = = = = o = -.-.S h.M.F.M.F.F.F.F.F.F.", ++"F.F.F.F.F.M.-.{ { %., , A x.M.M.M.M.M.F.F.F.F.F.F.F.", ++"F.F.F.F.F O , J e.x.d.d.{ { { +.x.x.x.M.M.M.M.M.=.Y F.F.F.F.F.F.", ++"F.F.F.F. O z H %.d.x.x.d.d.d.d.x.x.x.x...K N O C C.F.F.F.F.F.", ++"F.F.F.F.N O # = = = = = O O L C.F.F.F.F.F.F.", ++"F.F.F.F.M.M.%.V > & O = N =.F.F.F.F.F.F.F.F.F.", ++"F.F.F.F.F.F.F.M.M.M.M.x.x.M.M.M.M.M.M.M.M.M.F.F.F.F.F.F.F.F.F.F.", ++"F.F.F.F.F.F.F.F.F.F.F.F.F.F.F.F.F.F.F.F.F.F.F.F.F.F.F.F.F.F.F.F." ++}; diff --cc debian/sun_java_app.xpm index 000000000,000000000..7a0f9414e new file mode 100644 --- /dev/null +++ b/debian/sun_java_app.xpm @@@ -1,0 -1,0 +1,199 @@@ ++/* XPM */ ++static char * dukejre_xpm[] = { ++"32 32 164 2", ++" c None", ++". c #000000", ++"+ c #2D2D2D", ++"@ c #363636", ++"# c #313131", ++"$ c #3E3E3E", ++"% c #3D3D3D", ++"& c #2B2B2B", ++"* c #444444", ++"= c #494949", ++"- c #373737", ++"; c #1C1C1C", ++"> c #545454", ++", c #434343", ++"' c #272727", ++") c #0B0B0B", ++"! c #414141", ++"~ c #5C5C5C", ++"{ c #4F4F4F", ++"] c #141414", ++"^ c #3A3A3A", ++"/ c #585858", ++"( c #403A3A", ++"_ c #32201F", ++": c #090202", ++"< c #4F4E4E", ++"[ c #833D3E", ++"} c #BE3432", ++"| c #D63736", ++"1 c #79201F", ++"2 c #040101", ++"3 c #251313", ++"4 c #212121", ++"5 c #9A5857", ++"6 c #EBB9B9", ++"7 c #E03A38", ++"8 c #CA3430", ++"9 c #862221", ++"0 c #251211", ++"a c #774C35", ++"b c #CC5213", ++"c c #A25151", ++"d c #B23D3C", ++"e c #AC2B29", ++"f c #8F2422", ++"g c #691A19", ++"h c #321D1E", ++"i c #7E3D20", ++"j c #D55A0D", ++"k c #8E3F10", ++"l c #250F0F", ++"m c #5D1615", ++"n c #631817", ++"o c #661918", ++"p c #501313", ++"q c #200909", ++"r c #3D3A39", ++"s c #6D341B", ++"t c #AE4F14", ++"u c #CC5515", ++"v c #44403F", ++"w c #381414", ++"x c #310C0C", ++"y c #220908", ++"z c #1F0B0B", ++"A c #413939", ++"B c #525252", ++"C c #9B6343", ++"D c #D96319", ++"E c #D3A88C", ++"F c #D7D7D7", ++"G c #868383", ++"H c #797777", ++"I c #898989", ++"J c #A3A3A3", ++"K c #DBDBDB", ++"L c #E27626", ++"M c #DE6F21", ++"N c #E9E8E7", ++"O c #EDEDED", ++"P c #F8F8F8", ++"Q c #FFFFFF", ++"R c #EC7A20", ++"S c #E27728", ++"T c #ED7C21", ++"U c #E5E1DE", ++"V c #EEEEEE", ++"W c #F6F6F6", ++"X c #FDFDFD", ++"Y c #B76D3E", ++"Z c #E5711D", ++"` c #F0851E", ++" . c #ED7D24", ++".. c #F18B1D", ++"+. c #C75810", ++"@. c #CC5616", ++"#. c #E36F1D", ++"$. c #F2911A", ++"%. c #FBFBFB", ++"&. c #F9F6F2", ++"*. c #F9AA14", ++"=. c #F08A1D", ++"-. c #D35E13", ++";. c #E16D1C", ++">. c #F49B18", ++",. c #B9B9B9", ++"'. c #FAFAFA", ++"). c #F49818", ++"!. c #E3701D", ++"~. c #A1480F", ++"{. c #E06B1C", ++"]. c #FCFCFC", ++"^. c #E26D1C", ++"/. c #0F0F0F", ++"(. c #DE691B", ++"_. c #F6A016", ++":. c #E97E1C", ++"<. c #F7A117", ++"[. c #EB7A20", ++"}. c #E4701D", ++"|. c #837B77", ++"1. c #A0938A", ++"2. c #717171", ++"3. c #959595", ++"4. c #E1A27A", ++"5. c #DB844B", ++"6. c #D6671F", ++"7. c #D56318", ++"8. c #D6671B", ++"9. c #DB6616", ++"0. c #DE6614", ++"a. c #D96B1C", ++"b. c #D66A21", ++"c. c #D7996D", ++"d. c #C0BFBF", ++"e. c #393939", ++"f. c #686868", ++"g. c #8F8F8F", ++"h. c #D3D3D3", ++"i. c #D1D1D1", ++"j. c #CFCFCF", ++"k. c #DCDCDC", ++"l. c #D8D8D8", ++"m. c #C0C0C0", ++"n. c #C3C3C3", ++"o. c #333333", ++"p. c #696969", ++"q. c #606060", ++"r. c #E6E6E6", ++"s. c #A0A0A0", ++"t. c #8A8A8A", ++"u. c #5F5F5F", ++"v. c #8C8C8C", ++"w. c #B6B6B6", ++"x. c #BDBDBD", ++"y. c #262626", ++"z. c #7C7C7C", ++"A. c #C2C2C2", ++"B. c #C1C1C1", ++"C. c #B5B5B5", ++"D. c #6E6E6E", ++"E. c #868686", ++"F. c #ABABAB", ++"G. c #292929", ++" ", ++" . . ", ++" . . . . ", ++" . . . . ", ++" + . . . . . . ", ++" @ # . . . . . . . ", ++" $ % & . . . . . . ", ++" * = - ; . . . . . ", ++" * > , ' ) . . . . . . ", ++" ! ~ { # ] . . . . ", ++" ^ / / ( _ : . . . ", ++" # < [ } | 1 2 . ", ++" 3 4 5 6 7 8 9 0 a b . ", ++" 3 3 c d e f g h i j . ", ++" k l m n o p q r s j . ", ++" t u v w x y z A B C j . ", ++" $ . D E F G H I J K j L . ", ++" $ . M j N O P Q Q Q j R j ", ++" $ $ . S T j U V W X j T T j Y ", ++" $ . . Z ` T j j j j j ...T j +. ", ++" . . @.#.$.%.%.%.%.%.%.&.*.=.j -. ", ++" . . @.;.>.%.%.,.. '.%.%.*.).!.j ~. ", ++" . . @.{.*.%.%.%.. ].].%.*.*.^.j j ", ++" /.. j (.*.%.'.%.. '.%.%.*._.^.j j ", ++" . . . j :.%.'.%.%.'.'.%.<.[.}.j |. ", ++" . . # . . j j j T T T T T T T j 1.2. ", ++" . ^ 3.O 4.5.6.7.8.9.0.a.b.c.d.e.f. ", ++" ] . g.O h.i.j.j.h.h.h.k.l.m.n.o.p. ", ++" q.O O r.s.t.$ $ $ u.v.3.w.x.y.u. ", ++" z.A.B.C.D.$ $ $ $ u.E.F.& u. ", ++" # F.F.D.$ $ $ $ ' G.e.u. ", ++" $ $ $ $ $ $ $ "}; diff --cc debian/swing.properties index 000000000,000000000..0f55df0c5 new file mode 100644 --- /dev/null +++ b/debian/swing.properties @@@ -1,0 -1,0 +1,2 @@@ ++# uncomment to set the default look and feel to GTK ++#swing.defaultlaf=com.sun.java.swing.plaf.gtk.GTKLookAndFeel diff --cc debian/tests/control index 000000000,000000000..53f754f97 new file mode 100644 --- /dev/null +++ b/debian/tests/control @@@ -1,0 -1,0 +1,15 @@@ ++Test-Command: java --version ++Depends: openjdk-17-jre-headless ++Restrictions: superficial ++ ++Test-Command: javac --version ++Depends: openjdk-17-jdk-headless ++Restrictions: superficial ++ ++#Tests: hotspot, jaxp, langtools ++#Depends: @, default-jre-headless, jtreg6:native (>= 6+1-0~), testng:native, build-essential ++#Restrictions: allow-stderr, skippable, flaky ++ ++#Tests: jdk ++#Depends: @, default-jre-headless, jtreg6:native (>= 6+1-0~), testng:native, build-essential, xfwm4:native, xvfb, dbus-x11 ++#Restrictions: allow-stderr, skippable, flaky diff --cc debian/tests/control.in index 000000000,000000000..bbd5fb6db new file mode 100644 --- /dev/null +++ b/debian/tests/control.in @@@ -1,0 -1,0 +1,15 @@@ ++Test-Command: java --version ++Depends: openjdk-17-jre-headless ++Restrictions: superficial ++ ++Test-Command: javac --version ++Depends: openjdk-17-jdk-headless ++Restrictions: superficial ++ ++#Tests: hotspot, jaxp, langtools ++#Depends: @, default-jre-headless, @jtreg_pkg@:native (>= @min_jtreg_version@), testng:native, build-essential ++#Restrictions: allow-stderr, skippable, flaky ++ ++#Tests: jdk ++#Depends: @, default-jre-headless, @jtreg_pkg@:native (>= @min_jtreg_version@), testng:native, build-essential, xfwm4:native, xvfb, dbus-x11 ++#Restrictions: allow-stderr, skippable, flaky diff --cc debian/tests/hotspot index 000000000,000000000..b54f1031a new file mode 100755 --- /dev/null +++ b/debian/tests/hotspot @@@ -1,0 -1,0 +1,23 @@@ ++#!/bin/bash ++set -o errexit ++set -o errtrace ++set -o pipefail ++set -o nounset ++ ++problem_list=${AUTOPKGTEST_TMP}/hotspot-problems.txt ++cat test/hotspot/jtreg/ProblemList.txt > ${problem_list} ++ ++host_arch="${DEB_HOST_ARCH:-$(dpkg --print-architecture)}" ++if [[ -f debian/tests/problems-${host_arch}.txt ]]; then ++ cat debian/tests/problems-${host_arch}.txt >> ${problem_list} ++fi ++ ++native_path=$(pwd)/build/images/test/hotspot/jtreg/native ++debian/tests/jtreg-autopkgtest.sh hotspot \ ++ -exclude:${problem_list} \ ++ -dir:test/hotspot/jtreg \ ++ -nativepath:${native_path} \ ++ -k:!stress \ ++ :tier1 :tier2 ++ ++debian/tests/jtdiff-autopkgtest.sh hotspot diff --cc debian/tests/jaxp index 000000000,000000000..8c01d5ea9 new file mode 100755 --- /dev/null +++ b/debian/tests/jaxp @@@ -1,0 -1,0 +1,12 @@@ ++#!/bin/bash ++set -o errexit ++set -o errtrace ++set -o pipefail ++set -o nounset ++ ++debian/tests/jtreg-autopkgtest.sh jaxp \ ++ -exclude:test/jaxp/ProblemList.txt \ ++ -dir:test/jaxp \ ++ -k:!stress \ ++ :tier1 :tier2 ++debian/tests/jtdiff-autopkgtest.sh jaxp diff --cc debian/tests/jdk index 000000000,000000000..157c29978 new file mode 100755 --- /dev/null +++ b/debian/tests/jdk @@@ -1,0 -1,0 +1,43 @@@ ++#!/bin/bash ++set -o errexit ++set -o errtrace ++set -o pipefail ++set -o nounset ++ ++ ++cleanup() { ++ # kill window manager to clean up (rest will exit automatically) ++ pid="$(jobs -p)" ++ if [ -n "$pid" ]; then ++ xvfbpid="$(pgrep -l -P ${pid} | grep xvfb-run | cut -d' ' -f1)" ++ if [ -n "$xvfbpid" ]; then ++ pgrep -l -P ${xvfbpid} | grep xfwm4 | cut -d' ' -f1 | xargs kill -9 ++ fi ++ fi ++} ++ ++for sig in INT QUIT HUP TERM; do trap "cleanup; trap - $sig EXIT; kill -s $sig "'"$$"' "$sig"; done ++trap cleanup EXIT ++ ++export HOME="${AUTOPKGTEST_TMP}" ++export XAUTHORITY="${AUTOPKGTEST_TMP}/.Xauthority" ++export DISPLAY=:10 ++debian/tests/start-xvfb.sh 10 & ++sleep 3 ++ ++problem_list=${AUTOPKGTEST_TMP}/jdk-problems.txt ++cat test/jdk/ProblemList.txt > ${problem_list} ++ ++host_arch="${DEB_HOST_ARCH:-$(dpkg --print-architecture)}" ++if [[ -f debian/tests/problems-${host_arch}.txt ]]; then ++ cat debian/tests/problems-${host_arch}.txt >> ${problem_list} ++fi ++ ++native_path=$(pwd)/build/images/test/jdk/jtreg/native ++debian/tests/jtreg-autopkgtest.sh jdk \ ++ -exclude:${problem_list} \ ++ -dir:test/jdk \ ++ -nativepath:${native_path} \ ++ -k:!stress \ ++ :tier1 :tier2 ++debian/tests/jtdiff-autopkgtest.sh jdk diff --cc debian/tests/jtdiff-autopkgtest.in index 000000000,000000000..9f076f5e2 new file mode 100644 --- /dev/null +++ b/debian/tests/jtdiff-autopkgtest.in @@@ -1,0 -1,0 +1,58 @@@ ++#!/bin/bash ++set -o errexit ++set -o errtrace ++set -o pipefail ++set -o nounset ++ ++testsuite=$1 ++shift ++ ++if [ -z "${AUTOPKGTEST_TMP+x}" ] || [ -z "${AUTOPKGTEST_ARTIFACTS+x}" ]; then ++ echo "Environment variables AUTOPKGTEST_TMP and AUTOPKGTEST_ARTIFACTS must be set" >&2 ++ exit 1 ++fi ++ ++host_arch=${DEB_HOST_ARCH:-$(dpkg --print-architecture)} ++ ++if grep -q -w "${host_arch}" debian/tests/hotspot-archs; then ++ default_vm=hotspot ++else ++ default_vm=zero ++fi ++ ++vmname=${VMNAME:-${default_vm}} ++ ++jt_report_tb="@doc_dir@/test-${host_arch}/jtreport-${vmname}.tar.gz" ++ ++if [ ! -f "${jt_report_tb}" ]; then ++ echo "Unable to compare jtreg results: no build jtreport found for ${vmname}/${host_arch}." ++ echo "Reason: '${jt_report_tb}' does not exist." ++ exit 77 ++fi ++ ++# create directories to hold the results ++mkdir -p "${AUTOPKGTEST_ARTIFACTS}/${testsuite}" ++mkdir -p "${AUTOPKGTEST_TMP}/openjdk-pkg-jtreg-report" ++ ++current_report_dir="${AUTOPKGTEST_ARTIFACTS}/${testsuite}" ++previous_report_dir="${AUTOPKGTEST_TMP}/openjdk-pkg-jtreg-report/${testsuite}" ++ ++# extract testsuite results from openjdk package ++[ -d "${previous_report_dir}" ] || \ ++ tar -xf "${jt_report_tb}" --strip-components=1 -C "${AUTOPKGTEST_TMP}/openjdk-pkg-jtreg-report" ++ ++ ++jtdiff -o "${current_report_dir}/jtdiff.html" "${previous_report_dir}/JTreport" "${current_report_dir}/JTreport" || true ++jtdiff "${previous_report_dir}/JTreport" "${current_report_dir}/JTreport" | tee "${current_report_dir}/jtdiff.txt" || true ++ ++# create jdiff super-diff structure ++jtdiff_dir="${AUTOPKGTEST_TMP}/jtdiff-${testsuite}/${host_arch}" ++mkdir -p "${jtdiff_dir}/"{1,2} "${current_report_dir}/jtdiff-super" ++ln -sf "${previous_report_dir}/"[0-9] "${jtdiff_dir}/1/" ++ln -sf "${current_report_dir}/"[0-9] "${jtdiff_dir}/2/" ++ ++# run jtdiff super-diff ++jtdiff -o "${current_report_dir}/jtdiff-super/" -s "${AUTOPKGTEST_TMP}/jtdiff-${testsuite}/" || true ++ ++# fail if we detect a regression ++if egrep '^(pass|---) +(fail|error)' "${current_report_dir}/jtdiff.txt"; then exit 1; else exit 0; fi diff --cc debian/tests/jtdiff-autopkgtest.sh index 000000000,000000000..8586d7a7e new file mode 100755 --- /dev/null +++ b/debian/tests/jtdiff-autopkgtest.sh @@@ -1,0 -1,0 +1,58 @@@ ++#!/bin/bash ++set -o errexit ++set -o errtrace ++set -o pipefail ++set -o nounset ++ ++testsuite=$1 ++shift ++ ++if [ -z "${AUTOPKGTEST_TMP+x}" ] || [ -z "${AUTOPKGTEST_ARTIFACTS+x}" ]; then ++ echo "Environment variables AUTOPKGTEST_TMP and AUTOPKGTEST_ARTIFACTS must be set" >&2 ++ exit 1 ++fi ++ ++host_arch=${DEB_HOST_ARCH:-$(dpkg --print-architecture)} ++ ++if grep -q -w "${host_arch}" debian/tests/hotspot-archs; then ++ default_vm=hotspot ++else ++ default_vm=zero ++fi ++ ++vmname=${VMNAME:-${default_vm}} ++ ++jt_report_tb="/usr/share/doc/openjdk-17-jre-headless//test-${host_arch}/jtreport-${vmname}.tar.gz" ++ ++if [ ! -f "${jt_report_tb}" ]; then ++ echo "Unable to compare jtreg results: no build jtreport found for ${vmname}/${host_arch}." ++ echo "Reason: '${jt_report_tb}' does not exist." ++ exit 77 ++fi ++ ++# create directories to hold the results ++mkdir -p "${AUTOPKGTEST_ARTIFACTS}/${testsuite}" ++mkdir -p "${AUTOPKGTEST_TMP}/openjdk-pkg-jtreg-report" ++ ++current_report_dir="${AUTOPKGTEST_ARTIFACTS}/${testsuite}" ++previous_report_dir="${AUTOPKGTEST_TMP}/openjdk-pkg-jtreg-report/${testsuite}" ++ ++# extract testsuite results from openjdk package ++[ -d "${previous_report_dir}" ] || \ ++ tar -xf "${jt_report_tb}" --strip-components=1 -C "${AUTOPKGTEST_TMP}/openjdk-pkg-jtreg-report" ++ ++ ++jtdiff -o "${current_report_dir}/jtdiff.html" "${previous_report_dir}/JTreport" "${current_report_dir}/JTreport" || true ++jtdiff "${previous_report_dir}/JTreport" "${current_report_dir}/JTreport" | tee "${current_report_dir}/jtdiff.txt" || true ++ ++# create jdiff super-diff structure ++jtdiff_dir="${AUTOPKGTEST_TMP}/jtdiff-${testsuite}/${host_arch}" ++mkdir -p "${jtdiff_dir}/"{1,2} "${current_report_dir}/jtdiff-super" ++ln -sf "${previous_report_dir}/"[0-9] "${jtdiff_dir}/1/" ++ln -sf "${current_report_dir}/"[0-9] "${jtdiff_dir}/2/" ++ ++# run jtdiff super-diff ++jtdiff -o "${current_report_dir}/jtdiff-super/" -s "${AUTOPKGTEST_TMP}/jtdiff-${testsuite}/" || true ++ ++# fail if we detect a regression ++if egrep '^(pass|---) +(fail|error)' "${current_report_dir}/jtdiff.txt"; then exit 1; else exit 0; fi diff --cc debian/tests/jtreg-autopkgtest.in index 000000000,000000000..4b62d12d3 new file mode 100755 --- /dev/null +++ b/debian/tests/jtreg-autopkgtest.in @@@ -1,0 -1,0 +1,140 @@@ ++#!/bin/bash ++set -o errexit ++set -o errtrace ++set -o pipefail ++set -o nounset ++ ++testsuite=$1 ++shift ++ ++if [ -z "${AUTOPKGTEST_TMP+x}" ] || [ -z "${AUTOPKGTEST_ARTIFACTS+x}" ]; then ++ echo "Environment variables AUTOPKGTEST_TMP and AUTOPKGTEST_ARTIFACTS must be set" >&2 ++ exit 1 ++fi ++ ++host_arch="${DEB_HOST_ARCH:-$(dpkg --print-architecture)}" ++ ++if [ -z "${JDK_TO_TEST+x}" ]; then ++ JDK_TO_TEST=$(echo @JDK_DIR@ | sed "s/-[^-]*$/-$host_arch/") ++fi ++ ++if [ -z "${BOOTJDK_HOME+x}" ]; then ++ BOOTJDK_HOME=${JDK_TO_TEST} ++fi ++ ++jtreg_version="$(dpkg-query -W @jtreg_pkg@ | cut -f2)" ++ ++# set additional jtreg options ++jt_options="${JTREG_OPTIONS:-}" ++if [[ "armel" == *"${host_arch}"* ]]; then ++ jt_options+=" -Xmx256M" ++fi ++if dpkg --compare-versions ${jtreg_version} ge 4.2; then ++ jt_options+=" -conc:auto" ++fi ++ ++# check java binary ++if [ ! -x "${JDK_TO_TEST}/bin/java" ]; then ++ echo "Error: '${JDK_TO_TEST}/bin/java' is not an executable." >&2 ++ exit 1 ++fi ++ ++# restrict the tests to a few archs (set from debian/rules) ++if ! echo "${host_arch}" | grep -qE "^($(echo @jtreg_archs@ | tr ' ' '|'))$"; then ++ echo "Error: ${host_arch} is not on the jtreg_archs list, ignoring it." ++ exit 77 ++fi ++ ++jtreg_processes() { ++ ps x -ww -o pid,ppid,args \ ++ | awk '$2 == 1 && $3 ~ /^\/scratch/' \ ++ | sed "s,${JDK_TO_TEST},,g;s,$(pwd),,g" ++} ++ ++jtreg_pids() { ++ ps x --no-headers -ww -o pid,ppid,args \ ++ | awk "\$2 == 1 && \$3 ~ /^${JDK_TO_TEST//\//\\/}/ {print \$1}" ++} ++ ++cleanup() { ++ # kill testsuite processes still hanging ++ pids="$(jtreg_pids)" ++ if [ -n "$pids" ]; then ++ echo "[$0] killing processes..." ++ jtreg_processes ++ kill -1 $pids ++ sleep 2 ++ pids="$(jtreg_pids)" ++ if [ -n "$pids" ]; then ++ echo "[$0] try harder..." ++ jtreg_processes ++ kill -9 $pids ++ sleep 2 ++ fi ++ else ++ echo "[$0] nothing to cleanup" ++ fi ++ pids="$(jtreg_pids)" ++ if [ -n "$pids" ]; then ++ echo "[$0] leftover processes..." ++ $(jtreg_processes) ++ fi ++} ++ ++trap "cleanup" EXIT INT TERM ERR ++ ++jtwork_dir="${AUTOPKGTEST_TMP}/${testsuite}/JTwork" ++output_dir="${AUTOPKGTEST_ARTIFACTS}/${testsuite}/" ++ ++# retry tests with "fail" or "error" status at most 3 times ++for i in 0 1; do ++ # save each try under its own folder to preserve history ++ report_path="${i}/JTreport" ++ report_dir="${output_dir}/${report_path}" ++ jtreg ${jt_options} \ ++ -vmoption:-Djtreg.home=/usr/share/jtreg \ ++ -verbose:summary \ ++ -automatic \ ++ -retain:none \ ++ -ignore:quiet \ ++ -agentvm \ ++ -timeout:5 \ ++ -workDir:"${jtwork_dir}" \ ++ -reportDir:"${report_dir}" \ ++ -jdk:${JDK_TO_TEST} \ ++ -vmoption:-Dtest.boot.jdk=${BOOTJDK_HOME} \ ++ -vmoption:-XX:MaxRAMPercentage=25 \ ++ ${on_retry:-} $@ \ ++ && exit_code=0 || exit_code=$? ++ ++ # copy .jtr files from failed tests for latter debugging ++ find "${jtwork_dir}" -name '*.jtr' -exec egrep -q '^execStatus=[^Pass]' {} \; -printf "%P\n" \ ++ | while IF= read -r jtr; do ++ jtr_dir=$(dirname "${output_dir}/JTwork/${jtr}") ++ mkdir -p "${jtr_dir}" ++ cp --update --preserve --backup=numbered "${jtwork_dir}/${jtr}" "${output_dir}/JTwork/${jtr}" ++ # copy all generated hs_err_pid log into the jtr's directory to easy debugging ++ if grep -qhE 'hs_err_pid[0-9]+\.log' "${output_dir}/JTwork/${jtr}"; then ++ grep -hEo '/[^ ]*/hs_err_pid[0-9]+\.log' "${output_dir}/JTwork/${jtr}" \ ++ | xargs cp --update --preserve --backup=numbered -t "${jtr_dir}" \ ++ || echo "Warning: unable to find hs_err log for ${jtr}" ++ fi ++ done ++ ++ # break if jtdiff reports no difference from previous run ++ # also deletes the just created JTreport ++ # DISABLED: don't use it for now as flaky tests could still pass given more retries ++ #jtdiff "${output_dir}/JTreport" "$report_dir" >/dev/null 2>&1 \ ++ # && rm -rf "${report_dir}" && break ++ ++ # link latest JTreport to output_dir ++ ln -sf -t "${output_dir}" "${report_path}" ++ ++ # if all test passed there is not need for a retry ++ if [ "x${exit_code}" == "x0" ]; then break; fi ++ ++ # only retry tests with fail/error status ++ on_retry="-status:fail,error" ++done ++ ++exit $exit_code diff --cc debian/tests/jtreg-autopkgtest.sh index 000000000,000000000..8853da495 new file mode 100755 --- /dev/null +++ b/debian/tests/jtreg-autopkgtest.sh @@@ -1,0 -1,0 +1,146 @@@ ++#!/bin/bash ++set -o errexit ++set -o errtrace ++set -o pipefail ++set -o nounset ++ ++testsuite=$1 ++shift ++ ++if [ -z "${AUTOPKGTEST_TMP+x}" ] || [ -z "${AUTOPKGTEST_ARTIFACTS+x}" ]; then ++ echo "Environment variables AUTOPKGTEST_TMP and AUTOPKGTEST_ARTIFACTS must be set" >&2 ++ exit 1 ++fi ++ ++host_arch="${DEB_HOST_ARCH:-$(dpkg --print-architecture)}" ++ ++if [ -z "${JDK_TO_TEST+x}" ]; then ++ JDK_TO_TEST=$(echo /usr/lib/jvm/java-17-openjdk-amd64 | sed "s/-[^-]*$/-$host_arch/") ++fi ++ ++if [ -z "${BOOTJDK_HOME+x}" ]; then ++ BOOTJDK_HOME=${JDK_TO_TEST} ++fi ++ ++jtreg_version="$(dpkg-query -W jtreg6 | cut -f2)" ++ ++# set additional jtreg options ++jt_options="${JTREG_OPTIONS:-}" ++if [[ "armel" == *"${host_arch}"* ]]; then ++ jt_options+=" -Xmx256M" ++fi ++if dpkg --compare-versions ${jtreg_version} ge 4.2; then ++ jt_options+=" -conc:auto" ++fi ++ ++# check java binary ++if [ ! -x "${JDK_TO_TEST}/bin/java" ]; then ++ echo "Error: '${JDK_TO_TEST}/bin/java' is not an executable." >&2 ++ exit 1 ++fi ++ ++if [ ! -x "${BOOTJDK_HOME}/bin/java" ]; then ++ echo "Error: '${BOOTJDK_HOME}/bin/java' is not an executable." >&2 ++ exit 1 ++fi ++ ++# restrict the tests to a few archs (set from debian/rules) ++if ! echo "${host_arch}" | grep -qE "^($(echo amd64 i386 arm64 armhf ppc64 ppc64el s390x alpha ia64 powerpc ppc64 sh4 x32 armel mipsel mips64el riscv64 | tr ' ' '|'))$"; then ++ echo "Error: ${host_arch} is not on the jtreg_archs list, ignoring it." ++ exit 77 ++fi ++ ++jtreg_processes() { ++ ps x -ww -o pid,ppid,args \ ++ | awk '$2 == 1 && $3 ~ /^\/scratch/' \ ++ | sed "s,${JDK_TO_TEST},,g;s,$(pwd),,g" ++} ++ ++jtreg_pids() { ++ ps x --no-headers -ww -o pid,ppid,args \ ++ | awk "\$2 == 1 && \$3 ~ /^${JDK_TO_TEST//\//\\/}/ {print \$1}" ++} ++ ++cleanup() { ++ # kill testsuite processes still hanging ++ pids="$(jtreg_pids)" ++ if [ -n "$pids" ]; then ++ echo "[$0] killing processes..." ++ jtreg_processes ++ kill -1 $pids ++ sleep 2 ++ pids="$(jtreg_pids)" ++ if [ -n "$pids" ]; then ++ echo "[$0] try harder..." ++ jtreg_processes ++ kill -9 $pids ++ sleep 2 ++ fi ++ else ++ echo "[$0] nothing to cleanup" ++ fi ++ pids="$(jtreg_pids)" ++ if [ -n "$pids" ]; then ++ echo "[$0] leftover processes..." ++ $(jtreg_processes) ++ fi ++} ++ ++trap "cleanup" EXIT INT TERM ERR ++ ++jtwork_dir="${AUTOPKGTEST_TMP}/${testsuite}/JTwork" ++output_dir="${AUTOPKGTEST_ARTIFACTS}/${testsuite}/" ++ ++# retry tests with "fail" or "error" status at most 3 times ++for i in 0 1; do ++ # save each try under its own folder to preserve history ++ report_path="${i}/JTreport" ++ report_dir="${output_dir}/${report_path}" ++# see make/RunTests.gmk for a set of good options ++ jtreg ${jt_options} \ ++ -J-Djtreg.home=/usr/share/jtreg \ ++ -verbose:summary \ ++ -automatic \ ++ -retain:none \ ++ -ignore:quiet \ ++ -agentvm \ ++ -timeout:5 \ ++ -workDir:"${jtwork_dir}" \ ++ -reportDir:"${report_dir}" \ ++ -jdk:${JDK_TO_TEST} \ ++ -vmoption:-Dtest.boot.jdk=${BOOTJDK_HOME} \ ++ -vmoption:-XX:MaxRAMPercentage=25 \ ++ ${on_retry:-} $@ \ ++ && exit_code=0 || exit_code=$? ++ ++ # copy .jtr files from failed tests for latter debugging ++ find "${jtwork_dir}" -name '*.jtr' -exec egrep -q '^execStatus=[^Pass]' {} \; -printf "%P\n" \ ++ | while IF= read -r jtr; do ++ jtr_dir=$(dirname "${output_dir}/JTwork/${jtr}") ++ mkdir -p "${jtr_dir}" ++ cp --update --preserve --backup=numbered "${jtwork_dir}/${jtr}" "${output_dir}/JTwork/${jtr}" ++ # copy all generated hs_err_pid log into the jtr's directory to easy debugging ++ if grep -qhE 'hs_err_pid[0-9]+\.log' "${output_dir}/JTwork/${jtr}"; then ++ grep -hEo '/[^ ]*/hs_err_pid[0-9]+\.log' "${output_dir}/JTwork/${jtr}" \ ++ | xargs cp --update --preserve --backup=numbered -t "${jtr_dir}" \ ++ || echo "Warning: unable to find hs_err log for ${jtr}" ++ fi ++ done ++ ++ # break if jtdiff reports no difference from previous run ++ # also deletes the just created JTreport ++ # DISABLED: don't use it for now as flaky tests could still pass given more retries ++ #jtdiff "${output_dir}/JTreport" "$report_dir" >/dev/null 2>&1 \ ++ # && rm -rf "${report_dir}" && break ++ ++ # link latest JTreport to output_dir ++ ln -sf -t "${output_dir}" "${report_path}" ++ ++ # if all test passed there is not need for a retry ++ if [ "x${exit_code}" == "x0" ]; then break; fi ++ ++ # only retry tests with fail/error status ++ on_retry="-status:fail,error" ++done ++ ++exit $exit_code diff --cc debian/tests/langtools index 000000000,000000000..c37b1987c new file mode 100755 --- /dev/null +++ b/debian/tests/langtools @@@ -1,0 -1,0 +1,12 @@@ ++#!/bin/bash ++set -o errexit ++set -o errtrace ++set -o pipefail ++set -o nounset ++ ++debian/tests/jtreg-autopkgtest.sh langtools \ ++ -exclude:test/langtools/ProblemList.txt \ ++ -dir:test/langtools \ ++ -k:!stress \ ++ :tier1 :tier2 ++debian/tests/jtdiff-autopkgtest.sh langtools diff --cc debian/tests/problems-armhf.txt index 000000000,000000000..f0ba6e7cf new file mode 100644 --- /dev/null +++ b/debian/tests/problems-armhf.txt @@@ -1,0 -1,0 +1,26 @@@ ++ ++# :hotspot ++ ++# exclude test pending investigation ++# see https://launchpadlibrarian.net/658682532/buildlog_ubuntu-lunar-armhf.openjdk-20_20+36~us1-0ubuntu1~ppa22_BUILDING.txt.gz ++# crash log was present. ++ ++runtime/ErrorHandling/MachCodeFramesInErrorFile.java 000000 generic-all ++ ++# In armhf vm profiled methods code cache memory bean is not present. ++# This causes npe in the test. ++# The patch can not be sent upstream as it requires further investigation, ++ ++compiler/codecache/MHIntrinsicAllocFailureTest.java ++ ++# : jdk ++ ++# this test passes but is very slow in CI, causing timeout ++ ++jdk/sun/security/rsa/SignedObjectChain.java 000000 generic-all ++sun/security/ec/ed/EdDSATest.java 000000 generic-all ++java/security/SignedObject/Chain.java 000000 generic-all ++ ++# JDK-8311092 - native stack not implemented for armhf, disable test ++runtime/jni/nativeStack/TestNativeStack.java 8311092 generic-all ++ diff --cc debian/tests/problems-i386.txt index 000000000,000000000..01f2c91b2 new file mode 100644 --- /dev/null +++ b/debian/tests/problems-i386.txt @@@ -1,0 -1,0 +1,16 @@@ ++ ++# quarantine i386 tests pending investigation ++ ++# : hotspot ++ ++compiler/arguments/CheckCICompilerCount.java 000000 generic-all ++gtest/MetaspaceGtests.java#reclaim-none-ndebug 000000 generic-all ++gtest/MetaspaceGtests.java#reclaim-aggressive-ndebug 000000 generic-all ++gtest/GTestWrapper.java 000000 generic-all ++ ++# : jdk ++ ++tools/jlink/JLinkTest.java 000000 generic-all ++ ++java/net/httpclient/http2/HpackBinaryTestDriver.java 000000 generic-all ++ diff --cc debian/tests/start-xvfb.sh index 000000000,000000000..8c5fe4bed new file mode 100755 --- /dev/null +++ b/debian/tests/start-xvfb.sh @@@ -1,0 -1,0 +1,21 @@@ ++#!/bin/bash ++set -o errexit ++set -o errtrace ++set -o pipefail ++set -o nounset ++ ++if [ -z "${AUTOPKGTEST_TMP+x}" ] || [ -z "${AUTOPKGTEST_ARTIFACTS+x}" ]; then ++ echo "Environment variables AUTOPKGTEST_TMP and AUTOPKGTEST_ARTIFACTS must be set" >&2 ++ exit 1 ++fi ++export XAUTHORITY="${AUTOPKGTEST_TMP}/.Xauthority" ++export XDG_CACHE_HOME="${AUTOPKGTEST_TMP}" ++export XDG_DATA_HOME="${AUTOPKGTEST_TMP}" ++export XDG_CONFIG_HOME="${AUTOPKGTEST_TMP}" ++export XDG_RUNTIME_DIR=`mktemp -d` ++ ++exec dbus-run-session xvfb-run --server-num=${1:-10} \ ++ --error-file="${AUTOPKGTEST_ARTIFACTS}/xvfb-run.log" \ ++ --auth-file=${XAUTHORITY} \ ++ --server-args="-fbdir ${AUTOPKGTEST_TMP} -pixdepths 8 16 24 32 -extension GLX -screen 0 1600x900x24" \ ++ xfwm4 diff --cc debian/update-hgrev.sh index 000000000,000000000..8df65cf0d new file mode 100644 --- /dev/null +++ b/debian/update-hgrev.sh @@@ -1,0 -1,0 +1,29 @@@ ++#!/bin/bash ++ ++tarballs=(corba.tar.gz hotspot.tar.gz jaxp.tar.gz jaxws.tar.gz jdk-dfsg.tar.gz langtools-dfsg.tar.gz openjdk.tar.gz) ++varhgchange=(CORBA_CHANGESET HOTSPOT_CHANGESET JAXP_CHANGESET JAXWS_CHANGESET JDK_CHANGESET LANGTOOLS_CHANGESET OPENJDK_CHANGESET) ++tarballdir=. ++ ++makefile1=Makefile.am ++ ++function update_var() { ++ varname=$1 ++ newsum=$2 ++ ++ echo "$varname: ${newsum}" ++ if [ -f $makefile1 ]; then ++ sed -i "s/\(^$varname\)\(..*$\)/\1 = ${newsum}/" $makefile1 ++ fi ++} ++ ++function actual_tar_rev() { ++ tar=$1 ++ ++ revision=$(tar tf $tar | head -1 | sed 's,/.*,,' | sed 's,.*-,,') ++} ++ ++# For all modules ++for (( i = 0 ; i < ${#varhgchange[@]} ; i++ )); do ++ actual_tar_rev $tarballdir/${tarballs[$i]} ++ update_var ${varhgchange[$i]} $revision ++done diff --cc debian/update-shasum.sh index 000000000,000000000..af38b9247 new file mode 100644 --- /dev/null +++ b/debian/update-shasum.sh @@@ -1,0 -1,0 +1,23 @@@ ++#!/bin/bash ++ ++tarballs=(jdk-dfsg.tar.gz langtools-dfsg.tar.gz) ++varshasum=(JDK_SHA256SUM LANGTOOLS_SHA256SUM) ++tarballdir=. ++ ++makefile1=Makefile.am ++ ++function update_var() { ++ varname=$1 ++ newsum=$2 ++ ++ echo "$varname: ${newsum}" ++ if [ -f $makefile1 ]; then ++ sed -i "s/\(^$varname\)\(..*$\)/\1 = ${newsum}/" $makefile1 ++ fi ++} ++ ++# For all modules ++for (( i = 0 ; i < ${#tarballs[@]} ; i++ )); do ++ newsum=$(sha256sum $tarballdir/${tarballs[$i]} | cut -f 1 -d ' ') ++ update_var ${varshasum[$i]} $newsum ++done diff --cc debian/watch index 000000000,000000000..f94b7f53e new file mode 100644 --- /dev/null +++ b/debian/watch @@@ -1,0 -1,0 +1,6 @@@ ++version=4 ++opts=\ ++repack,\ ++compression=xz \ ++ https://github.com/openjdk/jdk17u/tags \ ++ (?:.*?/)?jdk-(\d[\d.]*\+\d[\d]*)\.tar\.gz