diff --git a/.circleci/config.yml b/.circleci/config.yml deleted file mode 100644 index 0a6a7ce90..000000000 --- a/.circleci/config.yml +++ /dev/null @@ -1,61 +0,0 @@ -# Java Gradle CircleCI 2.0 configuration file -# -# Check https://circleci.com/docs/2.0/language-java/ for more details -# -version: 2.1 -orbs: - codecov: codecov/codecov@3.1.0 - coverage-reporter: codacy/coverage-reporter@13.11.0 - -jobs: - build: - docker: - # specify the JVM version you desire here - - image: circleci/openjdk:11-jdk - - # Specify service dependencies here if necessary - # CircleCI maintains a library of pre-built images - # documented at https://circleci.com/docs/2.0/circleci-images/ - # - image: circleci/postgres:9.4 - - working_directory: ~/repo - - environment: - JVM_OPTS: -Xmx3200m # Customize the JVM maximum heap limit - TERM: dumb - - steps: - - checkout - - # Download and cache dependencies - - restore_cache: - keys: - - v1-dependencies-{{ checksum "build.gradle" }} - # fallback to using the latest cache if no exact match is found - - v1-dependencies- - - - run: gradle dependencies - - - save_cache: - paths: - - ~/.gradle - key: v1-dependencies-{{ checksum "build.gradle" }} - - # run tests! - - run: gradle test --info --stacktrace - - - store_test_results: - # Upload test results for display in Test Summary: https://circleci.com/docs/2.0/collect-test-data/ - path: build/test-results/test - - store_artifacts: # Upload test results for display in Artifacts: https://circleci.com/docs/2.0/artifacts/ - path: build/test-results/test - when: always - - - run: gradle jacocoTestReport - # Upload coverage results to codecov: - - codecov/upload: - file: docs/coverage/test/jacocoTestReport.xml - - coverage-reporter/send_report: - coverage-reports: docs/coverage/test/jacocoTestReport.xml - project-token: 3f617038c92448e5bed998f5662b67ab - diff --git a/.github/workflows/build-and-test.yml b/.github/workflows/build-and-test.yml new file mode 100644 index 000000000..3c6a4039c --- /dev/null +++ b/.github/workflows/build-and-test.yml @@ -0,0 +1,17 @@ +name: Build And Test on All OSs +on: [push, pull_request] +jobs: + gradle: + strategy: + matrix: + os: [ubuntu-latest, macos-latest, windows-latest] + java: [1.8, 11, 17, 21] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v1 + - uses: actions/setup-java@v1 + with: + java-version: ${{ matrix.java }} + - uses: eskatos/gradle-command-action@v1 + with: + arguments: check --scan \ No newline at end of file diff --git a/build.gradle b/build.gradle index d3090f3b0..a6bc375ee 100644 --- a/build.gradle +++ b/build.gradle @@ -44,14 +44,20 @@ plugins { id 'info.solidsoft.pitest' version '1.5.1' // Kotlin tests: - id 'org.jetbrains.kotlin.jvm' version '1.3.31' + id 'org.jetbrains.kotlin.jvm' version '1.7.10' // Public deployment : id 'maven-publish' id 'signing' id('io.github.gradle-nexus.publish-plugin') version '1.1.0' + + // For static analysis and null checking: + id "net.ltgt.errorprone" version "3.1.0" } +// If current version >= 11 we can do static analysis: +boolean WE_CAN_DO_STATIC_ANALYSIS = JavaVersion.current().isJava11Compatible() +println "Static analysis enabled: $WE_CAN_DO_STATIC_ANALYSIS" // This gives some more freedom for comments ( special characters... ). @@ -135,18 +141,28 @@ repositories { // Note: Visit the java-library gradle plugin documentation for more information about api vs implementation. dependencies { + if ( WE_CAN_DO_STATIC_ANALYSIS ) { + errorprone "com.uber.nullaway:nullaway:0.10.25" + compileOnly "com.google.code.findbugs:jsr305:3.0.2" + errorprone "com.google.errorprone:error_prone_core:2.27.0" + } + else + errorprone "com.google.errorprone:error_prone_core:2.9.+" + + implementation group: 'com.google.errorprone', name: 'error_prone_annotations', version: '2.27.0' + implementation group: 'org.jspecify', name: 'jspecify', version: '1.0.0' + // Core library dependencies : implementation 'org.jocl:jocl:2.0.5' //-> Internal dependency! // Logging : - implementation group: 'org.slf4j', name: 'slf4j-api', version: '2.0.7' + implementation group: 'org.slf4j', name: 'slf4j-api', version: '[1+, )' // Test suite : Groovy, Spock (+ sl4j, + Spock-Reports) //----------------------------------------------------- // mandatory dependencies for using Spock - testImplementation 'org.codehaus.groovy:groovy:3.0.13' - testImplementation 'org.spockframework:spock-core:2.3-groovy-3.0' - testImplementation 'com.athaydes:spock-reports:2.3.2-groovy-3.0' + testImplementation 'org.spockframework:spock-core:2.3-groovy-4.0' + testImplementation 'com.athaydes:spock-reports:2.5.1-groovy-4.0' //testImplementation group: 'org.apache.groovy', name: 'groovy', version: '4.0.6' //testImplementation 'org.spockframework:spock-core:2.3-groovy-4.0' @@ -156,14 +172,14 @@ dependencies { testRuntimeOnly group: 'org.objenesis', name: 'objenesis', version: '3.2' // allows mocking of classes without default constructor (together with ByteBuddy or CGLIB) // Kotlin tests: - testImplementation 'org.jetbrains.kotlin:kotlin-script-runtime:1.3.31' // first the runtime so that the tests compile + testImplementation 'org.jetbrains.kotlin:kotlin-script-runtime:1.7.10' // first the runtime so that the tests compile testImplementation 'org.jetbrains.kotlin:kotlin-test' // Then some test dependencies to do assertions... testImplementation 'org.assertj:assertj-core:3.23.1' // Same story here! - testImplementation 'org.junit.jupiter:junit-jupiter-api:5.9.0' - testRuntimeOnly 'org.junit.jupiter:junit-jupiter-engine:5.9.0' + testImplementation 'org.junit.jupiter:junit-jupiter-api:5.10.2' + testRuntimeOnly 'org.junit.jupiter:junit-jupiter-engine:5.10.2' - testImplementation 'org.slf4j:slf4j-api:2.0.3' // LOGGING - testImplementation 'org.slf4j:slf4j-simple:2.0.3' + testImplementation 'org.slf4j:slf4j-api:2.0.12' // Logging in tests + testImplementation 'org.slf4j:slf4j-simple:2.0.12' } compileTestKotlin { @@ -180,6 +196,9 @@ test { // Spock-Reports configuration can be found in the test resources: src/test/resources/SpockConfig.Groovy useJUnitPlatform() // ...otherwise spock tests will not be received by gradle... + testLogging { + exceptionFormat = 'full' // So we see spock power assert messages! + } // Memory : minHeapSize = "1g" @@ -189,13 +208,32 @@ test { maxParallelForks = 1//Math.floor(1+(Runtime.runtime.availableProcessors().intdiv(2) ?: 1)/2) } +if ( WE_CAN_DO_STATIC_ANALYSIS ) { + tasks.withType(JavaCompile) { + // remove the if condition if you want to run NullAway on test code + if (!name.toLowerCase().contains("test")) { + options.errorprone { + check("NullAway", net.ltgt.gradle.errorprone.CheckSeverity.ERROR) + option("NullAway:AnnotatedPackages", "") + // We disable the NonCanonicalType because non-canonical type declarations are used for convenience + check("NonCanonicalType", net.ltgt.gradle.errorprone.CheckSeverity.OFF) + } + } else { + options.errorprone { + check("NullAway", net.ltgt.gradle.errorprone.CheckSeverity.WARN) + option("NullAway:AnnotatedPackages", "") + } + } + } +} + //---------------------------------------------------------------------------------------------------------------------- // 8. CONFIGURING TEST REPORTING : jacoco { - toolVersion = "0.8.9" - reportsDir = file("docs/coverage") + toolVersion = "0.8.11" + reportsDirectory.set(file("docs/coverage")) } jacocoTestReport { diff --git a/gradle/wrapper/gradle-wrapper.jar b/gradle/wrapper/gradle-wrapper.jar index 7454180f2..943f0cbfa 100644 Binary files a/gradle/wrapper/gradle-wrapper.jar and b/gradle/wrapper/gradle-wrapper.jar differ diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties index 774fae876..707e499ac 100644 --- a/gradle/wrapper/gradle-wrapper.properties +++ b/gradle/wrapper/gradle-wrapper.properties @@ -1,5 +1,6 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionUrl=https\://services.gradle.org/distributions/gradle-7.6.1-bin.zip +distributionUrl=https\://services.gradle.org/distributions/gradle-8.10-bin.zip +networkTimeout=10000 zipStoreBase=GRADLE_USER_HOME zipStorePath=wrapper/dists diff --git a/gradlew b/gradlew index 1b6c78733..65dcd68d6 100755 --- a/gradlew +++ b/gradlew @@ -55,7 +55,7 @@ # Darwin, MinGW, and NonStop. # # (3) This script is generated from the Groovy template -# https://github.com/gradle/gradle/blob/master/subprojects/plugins/src/main/resources/org/gradle/api/internal/plugins/unixStartScript.txt +# https://github.com/gradle/gradle/blob/HEAD/subprojects/plugins/src/main/resources/org/gradle/api/internal/plugins/unixStartScript.txt # within the Gradle project. # # You can find Gradle at https://github.com/gradle/gradle/. @@ -80,10 +80,10 @@ do esac done -APP_HOME=$( cd "${APP_HOME:-./}" && pwd -P ) || exit - -APP_NAME="Gradle" +# This is normally unused +# shellcheck disable=SC2034 APP_BASE_NAME=${0##*/} +APP_HOME=$( cd "${APP_HOME:-./}" && pwd -P ) || exit # Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"' @@ -143,12 +143,16 @@ fi if ! "$cygwin" && ! "$darwin" && ! "$nonstop" ; then case $MAX_FD in #( max*) + # In POSIX sh, ulimit -H is undefined. That's why the result is checked to see if it worked. + # shellcheck disable=SC3045 MAX_FD=$( ulimit -H -n ) || warn "Could not query maximum file descriptor limit" esac case $MAX_FD in #( '' | soft) :;; #( *) + # In POSIX sh, ulimit -n is undefined. That's why the result is checked to see if it worked. + # shellcheck disable=SC3045 ulimit -n "$MAX_FD" || warn "Could not set maximum file descriptor limit to $MAX_FD" esac @@ -205,6 +209,12 @@ set -- \ org.gradle.wrapper.GradleWrapperMain \ "$@" +# Stop when "xargs" is not available. +if ! command -v xargs >/dev/null 2>&1 +then + die "xargs is not available" +fi + # Use "xargs" to parse quoted args. # # With -n1 it outputs one arg per line, with the quotes and backslashes removed. diff --git a/gradlew.bat b/gradlew.bat index 107acd32c..93e3f59f1 100644 --- a/gradlew.bat +++ b/gradlew.bat @@ -14,7 +14,7 @@ @rem limitations under the License. @rem -@if "%DEBUG%" == "" @echo off +@if "%DEBUG%"=="" @echo off @rem ########################################################################## @rem @rem Gradle startup script for Windows @@ -25,7 +25,8 @@ if "%OS%"=="Windows_NT" setlocal set DIRNAME=%~dp0 -if "%DIRNAME%" == "" set DIRNAME=. +if "%DIRNAME%"=="" set DIRNAME=. +@rem This is normally unused set APP_BASE_NAME=%~n0 set APP_HOME=%DIRNAME% @@ -40,7 +41,7 @@ if defined JAVA_HOME goto findJavaFromJavaHome set JAVA_EXE=java.exe %JAVA_EXE% -version >NUL 2>&1 -if "%ERRORLEVEL%" == "0" goto execute +if %ERRORLEVEL% equ 0 goto execute echo. echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. @@ -75,13 +76,15 @@ set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar :end @rem End local scope for the variables with windows NT shell -if "%ERRORLEVEL%"=="0" goto mainEnd +if %ERRORLEVEL% equ 0 goto mainEnd :fail rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of rem the _cmd.exe /c_ return code! -if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1 -exit /b 1 +set EXIT_CODE=%ERRORLEVEL% +if %EXIT_CODE% equ 0 set EXIT_CODE=1 +if not ""=="%GRADLE_EXIT_CONSOLE%" exit %EXIT_CODE% +exit /b %EXIT_CODE% :mainEnd if "%OS%"=="Windows_NT" endlocal diff --git a/src/main/java/neureka/AbstractNda.java b/src/main/java/neureka/AbstractNda.java index a7b3e397e..2b571728c 100644 --- a/src/main/java/neureka/AbstractNda.java +++ b/src/main/java/neureka/AbstractNda.java @@ -167,7 +167,7 @@ private void _setDataAndCountUsage( Data newData ) { if ( newData instanceof DeviceData ) ( (DeviceData) newData ).incrementUsageCount(); - _data = newData; // This must be the only place where the data is set!!! + _data = ( newData != null ? newData : (Data) Data.none()); // This must be the only place where the data is set!!! } protected void _initDataArrayFrom( Filler filler ) @@ -254,17 +254,13 @@ protected final void _allocateVirtual() { * * @return An {@link TensorConstructor} exposing a simple API for configuring a new {@link Tensor} instance. */ - protected static TensorConstructor constructFor(AbstractNda nda, Device targetDevice, NDConstructor ndConstructor ) + protected static TensorConstructor constructFor( Device targetDevice, NDConstructor ndConstructor ) { return - new TensorConstructor( - targetDevice, ndConstructor, - new TensorConstructor.API() { - @Override public void setConf( NDConfiguration conf ) { nda.mut().setNDConf( conf ); } - @Override public void setData( Data o ) { nda._setData( o ); } - @Override public void setIsVirtual( boolean isVirtual ) { nda._setIsVirtual( isVirtual ); } - } - ); + new TensorConstructor( + targetDevice, ndConstructor, + new TensorConstructor.Args() + ); } /** @@ -323,12 +319,7 @@ public boolean is( Class typeClass ) { protected void _setNDConf( NDConfiguration ndConfiguration ) { _guardSet( "ND-Configuration" ); - if ( _NDConf != null && ndConfiguration != null ) { - int s1 = Arrays.stream( _NDConf.shape() ).map( Math::abs ).reduce( 1, ( a, b ) -> a * b ); - int s2 = Arrays.stream( ndConfiguration.shape() ).map( Math::abs ).reduce( 1, ( a, b ) -> a * b ); - assert s1 == s2; - } - _NDConf = ndConfiguration; + _NDConf = ( ndConfiguration != null ? ndConfiguration : NDConfiguration.none() ); } } diff --git a/src/main/java/neureka/Data.java b/src/main/java/neureka/Data.java index b8a97b8ec..47c8609f2 100644 --- a/src/main/java/neureka/Data.java +++ b/src/main/java/neureka/Data.java @@ -16,6 +16,17 @@ */ public interface Data { + /** + * This is a static factory method which returns a {@link Data} object + * which does not contain any data. It is a sort of no-operation null object + * which can be used to represent the absence of data. + * A deleted tensor will typically have a {@link Data} object which does not contain any data. + * + * @return A {@link Data} object which does not contain any data. + */ + static Data none() { return NoOpData.INSTANCE; } + + static Data of( Class type, V... data ) { return CPU.get().allocate( type, data ); } static Data of( float... items ) { return CPU.get().allocate( Float.class, items ); } diff --git a/src/main/java/neureka/NoOpData.java b/src/main/java/neureka/NoOpData.java new file mode 100644 index 000000000..2101193f0 --- /dev/null +++ b/src/main/java/neureka/NoOpData.java @@ -0,0 +1,32 @@ +package neureka; + +import neureka.devices.Device; +import neureka.devices.host.CPU; +import neureka.dtype.DataType; + +final class NoOpData implements Data +{ + static final NoOpData INSTANCE = new NoOpData(); + + private NoOpData() {} + + @Override + public Device owner() { + return (Device) CPU.get(); + } + + @Override + public Object getOrNull() { + return null; + } + + @Override + public DataType dataType() { + return DataType.of(Void.class); + } + + @Override + public int usages() { + return 0; + } +} diff --git a/src/main/java/neureka/TensorConstructor.java b/src/main/java/neureka/TensorConstructor.java index 095350090..de4336064 100644 --- a/src/main/java/neureka/TensorConstructor.java +++ b/src/main/java/neureka/TensorConstructor.java @@ -23,17 +23,7 @@ */ final class TensorConstructor { - /** - * An interface defining methods for configuring a {@link Tensor} - * in the making... - */ - public interface API { - void setConf( NDConfiguration conf ); - void setData( Data o ); - void setIsVirtual( boolean isVirtual ); - } - - private final API _API; + private final Args _Args; private final Device _targetDevice; private final NDConstructor _ndConstructor; @@ -41,14 +31,14 @@ public interface API { * * @param targetDevice The {@link Device} to be used for the construction of the {@link Tensor} * @param ndConstructor A producer of the {@link NDConfiguration} interface implementation. - * @param API An implementation of the {@link API} interface. + * @param Args An implementation of the {@link Args} interface. */ - public TensorConstructor(Device targetDevice, NDConstructor ndConstructor, API API ) { + public TensorConstructor(Device targetDevice, NDConstructor ndConstructor, Args Args) { LogUtil.nullArgCheck( targetDevice, "targetDevice", Device.class, "Cannot construct a tensor without target device." ); LogUtil.nullArgCheck( ndConstructor, "ndConstructor", NDConstructor.class, "Cannot construct tensor without shape information." ); _targetDevice = (Device) targetDevice; _ndConstructor = ndConstructor; - _API = API; + _Args = Args; } /** @@ -57,21 +47,24 @@ public TensorConstructor(Device targetDevice, NDConstructor ndConstructor, AP * @param makeVirtual A flag determining if the tensor should be actual or virtual (not fully allocated). * @param autoAllocate Determines if the underlying data array should be allocated or not. */ - void unpopulated( + Args unpopulated( boolean makeVirtual, boolean autoAllocate, DataType type ) { NDConfiguration ndc = _ndConstructor.produceNDC( makeVirtual ); - _API.setIsVirtual( makeVirtual ); - _API.setConf( ndc ); - if ( autoAllocate ) _API.setData( _targetDevice.allocate( type, ndc ) ); + _Args.setIsVirtual( makeVirtual ); + _Args.setConf( ndc ); + if ( autoAllocate ) + _Args.setData( _targetDevice.allocate( type, ndc ) ); + return _Args; } - public void constructTrusted( Data data ) { - _API.setConf( _ndConstructor.produceNDC( false ) ); - _API.setData( data ); + public Args constructTrusted(Data data ) { + _Args.setConf( _ndConstructor.produceNDC( false ) ); + _Args.setData( data ); + return _Args; } - public void tryConstructing( + public Args tryConstructing( DataType dataType, Object data ) { @@ -92,37 +85,64 @@ public void tryConstructing( isDefinitelyScalarValue = true; } - if ( isDefinitelyScalarValue ) // This means that "data" is a single value! - if ( newPopulatedFromOne( data, dataType.getItemTypeClass() ) ) return; + if ( isDefinitelyScalarValue ) { // This means that "data" is a single value! + newPopulatedFromOne( data, dataType.getItemTypeClass() ); + if ( data != null ) + return _Args; + } } NDConfiguration ndc = _ndConstructor.produceNDC( false ); - _API.setIsVirtual( false ); - _API.setConf( ndc ); - _API.setData( _targetDevice.allocateFromAll( dataType, ndc, data) ); + _Args.setIsVirtual( false ); + _Args.setConf( ndc ); + _Args.setData( _targetDevice.allocateFromAll( dataType, ndc, data) ); + return _Args; } - public boolean newPopulatedFromOne( Object singleItem, Class type ) + public Args newPopulatedFromOne(Object singleItem, Class type ) { int size = _ndConstructor.getSize(); NDConfiguration ndc = _ndConstructor.produceNDC(_ndConstructor.getSize() > 1); DataType dataType = (DataType) DataType.of( type ); Data array = _targetDevice.allocateFromOne( dataType, ndc, singleItem ); - _API.setIsVirtual( size > 1 ); - _API.setConf( ndc ); - _API.setData( array ); - return singleItem != null; + _Args.setIsVirtual( size > 1 ); + _Args.setConf( ndc ); + _Args.setData( array ); + return _Args; } - public void newSeeded( Class valueType, Arg.Seed seed ) + public Args newSeeded(Class valueType, Arg.Seed seed ) { NDConfiguration ndc = _ndConstructor.produceNDC( false ); Data data = _targetDevice.allocate( DataType.of( valueType ), ndc ); Object out = CPURandomization.fillRandomly( data.getOrNull(), seed ); assert out == data.getOrNull(); - _API.setIsVirtual( false ); - _API.setConf( ndc ); - _API.setData( data ); + _Args.setIsVirtual( false ); + _Args.setConf( ndc ); + _Args.setData( data ); + return _Args; + } + + /** + * An interface defining methods for configuring a {@link Tensor} + * in the making... + */ + static class Args { + private NDConfiguration _conf; + private Data _data; + private Boolean _isVirtual; + + public void setConf( NDConfiguration conf ) { _conf = conf; } + + public void setData( Data o ) { _data = o; } + + public void setIsVirtual( boolean isVirtual ) { _isVirtual = isVirtual; } + + public NDConfiguration getConf() { return _conf; } + + public Data getData() { return _data; } + + public Boolean isVirtual() { return _isVirtual; } } } diff --git a/src/main/java/neureka/TensorImpl.java b/src/main/java/neureka/TensorImpl.java index ee0ea6406..1947c55da 100644 --- a/src/main/java/neureka/TensorImpl.java +++ b/src/main/java/neureka/TensorImpl.java @@ -114,9 +114,8 @@ static Tensor _of( Object... args ) { if ( args == null || args.length == 0 ) return new TensorImpl<>(); if ( args.length == 1 ) { - TensorImpl t = new TensorImpl<>(); - boolean success = constructFor(t, CPU.get(), NDConstructor.of(1)).newPopulatedFromOne( args[ 0 ], args[ 0 ].getClass() ); - if ( !success ) { + TensorImpl t = new TensorImpl<>(constructFor(CPU.get(), NDConstructor.of(1)).newPopulatedFromOne( args[ 0 ], args[ 0 ].getClass() )); + if ( args[ 0 ] == null ) { String message = "Cannot create tensor from argument of type '" + args[ 0 ].getClass().getName() + "'!"; _LOG.error( message ); throw new IllegalArgumentException( message ); @@ -126,13 +125,11 @@ static Tensor _of( Object... args ) Class commonType = _extractCommonType(args); if ( commonType != null ) { - TensorImpl t = new TensorImpl<>(); - constructFor(t, CPU.get(), NDConstructor.of( args.length )) - .tryConstructing( - DataType.of(commonType), - args - ); - return t; + return new TensorImpl<>(constructFor(CPU.get(), NDConstructor.of( args.length )) + .tryConstructing( + DataType.of(commonType), + args + )); } /* EXPRESSION BASED CONSTRUCTION: @@ -177,15 +174,12 @@ static Tensor _of( Iterable iterable ) static Tensor _of( List list ) { - TensorImpl t = new TensorImpl<>(); - Class commonType = _extractCommonType( list.toArray() ); - // We construct the tensor: - constructFor(t, CPU.get(), NDConstructor.of( list.size() )) - .tryConstructing( - DataType.of(commonType), - list.toArray() - ); - return t; + return new TensorImpl<>( + constructFor(CPU.get(), NDConstructor.of( list.size() )) + .tryConstructing( + DataType.of(_extractCommonType( list.toArray() )), + list.toArray() + )); } @@ -227,6 +221,18 @@ private static Class _extractCommonType( Object... args ) { }); } + TensorImpl( TensorConstructor.Args args ) { + NDConfiguration ndc = args.getConf(); + Boolean isVirtual = args.isVirtual(); + Data data = (Data) args.getData(); + if ( isVirtual != null ) + _setIsVirtual( isVirtual ); + if ( ndc != null ) + _setNDConf( ndc ); + if ( data != null ) + _setData( data ); + } + public static TensorImpl _of( NDConstructor ndConstructor, Device device, DataType dataType, Object value ) { Object data = value; if ( List.class.isAssignableFrom( dataType.getItemTypeClass() ) ) @@ -241,9 +247,7 @@ public static TensorImpl _of( NDConstructor ndConstructor, Device device, List range = (List) data; data = range.toArray();// TODO: This is probably wrong! } - TensorImpl t = new TensorImpl<>(); - constructFor(t, device, ndConstructor).tryConstructing( dataType, data ); - return t; + return new TensorImpl<>(constructFor(device, ndConstructor).tryConstructing( dataType, data )); } static TensorImpl _of( NDConstructor ndConstructor, DataType dataType, Data data ) { @@ -253,9 +257,7 @@ static TensorImpl _of( NDConstructor ndConstructor, DataType dataType, "The data type of the data is not compatible with the data type of the tensor!" ); - TensorImpl t = new TensorImpl<>(); - constructFor(t, data.owner(), ndConstructor).constructTrusted( data ); - return t; + return new TensorImpl<>(constructFor(data.owner(), ndConstructor).constructTrusted( data )); } /** @@ -265,8 +267,7 @@ static TensorImpl _of( NDConstructor ndConstructor, DataType type, Fil LogUtil.nullArgCheck(ndConstructor, "ndcProducer", NDConstructor.class ); LogUtil.nullArgCheck( type, "type", DataType.class ); LogUtil.nullArgCheck( type, "filler", Filler.class ); - TensorImpl t = new TensorImpl<>(); - constructFor(t, CPU.get(), ndConstructor).unpopulated( false, true, type ); + TensorImpl t = new TensorImpl<>(constructFor(CPU.get(), ndConstructor).unpopulated( false, true, type )); t._initDataArrayFrom( filler ); return t; } @@ -278,17 +279,13 @@ static TensorImpl _of( Class valueType, NDConstructor ndConstructor, A LogUtil.nullArgCheck( valueType, "valueType", Class.class ); LogUtil.nullArgCheck(ndConstructor, "ndcProducer", NDConstructor.class ); LogUtil.nullArgCheck( seed, "seed", Arg.Seed.class ); - TensorImpl t = new TensorImpl<>(); - constructFor(t, CPU.get(), ndConstructor).newSeeded( valueType, seed ); - return t; + return new TensorImpl<>(constructFor(CPU.get(), ndConstructor).newSeeded( valueType, seed )); } static TensorImpl _of( NDConstructor ndConstructor, DataType type ) { LogUtil.nullArgCheck(ndConstructor, "ndcProducer", NDConstructor.class ); LogUtil.nullArgCheck( type, "type", DataType.class ); - TensorImpl t = new TensorImpl<>(); - constructFor(t, CPU.get(), ndConstructor).unpopulated( true, true, type ); - return t; + return new TensorImpl<>(constructFor(CPU.get(), ndConstructor).unpopulated( true, true, type )); } /*================================================================================================================== @@ -366,7 +363,9 @@ public Tensor setIsVirtual(boolean isVirtual ) _actualize(); // Virtual and actual tensors require a different mapping from a given index to the underlying data.. // Therefore, we need to re-initialize the NDConfiguration object: - constructFor(this, getDevice(),NDConstructor.of(getNDConf().shape())).unpopulated( isVirtual, false, getDataType() ); + TensorConstructor.Args args = constructFor(getDevice(),NDConstructor.of(getNDConf().shape())).unpopulated( isVirtual, false, getDataType() ); + _setState( args ); + if ( isVirtual ) this.find( Relation.class ) .ifPresent( r -> @@ -385,6 +384,18 @@ public Tensor setIsVirtual(boolean isVirtual ) return this; } + private void _setState(TensorConstructor.Args args) { + Boolean isVirtual = args.isVirtual(); + NDConfiguration ndc = args.getConf(); + Data data = (Data) args.getData(); + if ( isVirtual != null ) + _setIsVirtual( isVirtual ); + if ( ndc != null ) + _setNDConf( ndc ); + if ( data != null ) + _setData( data ); + } + /** * This method is the inner counterpart to the public "{@link MutateTensor#setIsVirtual}" method. * It actually performs the bit flipping by applying the corresponding bit mask.
diff --git a/src/main/java/neureka/backend/api/Operation.java b/src/main/java/neureka/backend/api/Operation.java index ac852c722..7e8917bcb 100644 --- a/src/main/java/neureka/backend/api/Operation.java +++ b/src/main/java/neureka/backend/api/Operation.java @@ -43,9 +43,11 @@ of this software and associated documentation files (the "Software"), to deal import neureka.autograd.GraphNode; import neureka.backend.api.fun.Execution; import neureka.backend.api.template.operations.OperationBuilder; +import neureka.devices.Device; import neureka.math.Function; import neureka.math.implementations.FunctionConstant; -import neureka.devices.Device; + +import java.util.List; /** * This interface is part of the backend API, and it embodies the top layer of the 3 tier backend architecture. @@ -202,7 +204,9 @@ default Result execute( Function caller, ExecutionCall call ) if ( d >= 0 && !caller.dependsOn(d) ) throw new IllegalArgumentException("Cannot derive w.r.t. to input index " + d + " in function '" + caller + "', because there is no input with index "+d+"!"); - if ( caller.getSubFunctions().stream().allMatch( f -> f instanceof FunctionConstant) ) { + List subFunctions = caller.getSubFunctions(); + + if ( subFunctions.stream().allMatch( f -> f instanceof FunctionConstant) ) { if ( d < 0 ) return Result.of(Tensor.like((Tensor)call.input(0)).all(caller.call(new double[0])).mut().setIsIntermediate(true)); else return Result.of(Tensor.like((Tensor)call.input(0)).all(0).mut().setIsIntermediate(true)); } diff --git a/src/main/java/neureka/backend/main/algorithms/BiElementwise.java b/src/main/java/neureka/backend/main/algorithms/BiElementwise.java index 847fab7a2..e6a941a3c 100644 --- a/src/main/java/neureka/backend/main/algorithms/BiElementwise.java +++ b/src/main/java/neureka/backend/main/algorithms/BiElementwise.java @@ -2,6 +2,7 @@ import neureka.Tensor; import neureka.backend.api.AutoDiffMode; +import neureka.backend.api.ExecutionCall; import neureka.backend.api.Result; import neureka.backend.api.template.algorithms.AbstractDeviceAlgorithm; import neureka.backend.api.template.algorithms.AbstractFunDeviceAlgorithm; @@ -28,28 +29,35 @@ public BiElementwise() { outerCaller, outerCall, innerCall -> AbstractDeviceAlgorithm.executeDeviceAlgorithm( innerCall ) )) + //(outerCaller, outerCall) -> { + // ExecutionCall> finalOuterCall = _prepare(outerCall); + // return Result.of(executeOnCommonDevice(finalOuterCall, ()->{ + // return AbstractDeviceAlgorithm.executeDeviceAlgorithm(finalOuterCall); + // })); + //} ); - setCallPreparation( - call -> { - if ( call.arity() < 3 ) call = call.withAddedInputAt(0, null); - Device device = (Device) call.getDevice(); - if ( call.input( 0 ) == null ) // Creating a new tensor: - { - int[] outShape = call.input( 1 ).getNDConf().shape(); + setCallPreparation(this::_prepare); + } + + private ExecutionCall _prepare( final ExecutionCall inputCall ) { + ExecutionCall call = inputCall; + if ( call.arity() < 3 ) call = call.withAddedInputAt(0, null); + Device device = (Device) call.getDevice(); + if ( call.input( 0 ) == null ) // Creating a new tensor: + { + int[] outShape = call.input( 1 ).getNDConf().shape(); - Class type = (Class) call.input( 1 ).getItemType(); - Tensor output = Tensor.of( type ).withShape( outShape ).all( 0.0 ).mut().setIsIntermediate( true ); - output.mut().setIsVirtual( false ); - try { - device.store( output ); - } catch( Exception e ) { - e.printStackTrace(); - } - call = call.withInputAt( 0, output ); - } - return call; + Class type = (Class) call.input( 1 ).getItemType(); + Tensor output = Tensor.of( type ).withShape( outShape ).all( 0.0 ).mut().setIsIntermediate( true ); + output.mut().setIsVirtual( false ); + try { + device.store( output ); + } catch( Exception e ) { + e.printStackTrace(); } - ); + call = call.withInputAt( 0, output ); + } + return call; } } \ No newline at end of file diff --git a/src/main/java/neureka/backend/main/algorithms/MatMulAlgorithm.java b/src/main/java/neureka/backend/main/algorithms/MatMulAlgorithm.java index 7ee446025..20f3b271f 100644 --- a/src/main/java/neureka/backend/main/algorithms/MatMulAlgorithm.java +++ b/src/main/java/neureka/backend/main/algorithms/MatMulAlgorithm.java @@ -33,10 +33,7 @@ public MatMulAlgorithm() { setAutogradModeFor( call -> AutoDiffMode.BACKWARD_ONLY ); setExecution( (outerCaller, outerCall) -> - Result.of(AbstractDeviceAlgorithm.executeFor( - outerCaller, outerCall, - innerCall -> AbstractDeviceAlgorithm.executeDeviceAlgorithm( innerCall ) - )) + Result.of(AbstractDeviceAlgorithm.executeDeviceAlgorithm(_prepare(outerCall))) .withAutoDiff( (Function f, ExecutionCall> adCall ) -> { if ( adCall.autogradMode().allowsForward() ) diff --git a/src/main/java/neureka/backend/main/implementations/fun/api/CPUBiFun.java b/src/main/java/neureka/backend/main/implementations/fun/api/CPUBiFun.java index e283322cb..46f954fd4 100644 --- a/src/main/java/neureka/backend/main/implementations/fun/api/CPUBiFun.java +++ b/src/main/java/neureka/backend/main/implementations/fun/api/CPUBiFun.java @@ -4,19 +4,35 @@ public strictfp interface CPUBiFun { double invoke(double a, double b); - default float invoke(float a, float b) { return (float) invoke( (double) a, (double) b ); } + default float invoke(float a, float b) { + return (float) invoke( a, (double) b ); + } - default int invoke(int a, int b) { return (int) Math.round( invoke( (double) a, (double) b ) ); } + default int invoke(int a, int b) { + return (int) Math.round( invoke( a, (double) b ) ); + } - default long invoke(long a, long b) { return Math.round( invoke( (double) a, (double) b ) ); } + default long invoke(long a, long b) { + return Math.round( invoke( (double) a, (double) b ) ); + } - default byte invoke(byte a, byte b) { return (byte) Math.round( invoke( (double) a, (double) b ) ); } + default byte invoke(byte a, byte b) { + return (byte) Math.round( invoke( a, (double) b ) ); + } - default short invoke(short a, short b) { return (short) Math.round( invoke( (double) a, (double) b ) ); } + default short invoke(short a, short b) { + return (short) Math.round( invoke( a, (double) b ) ); + } - default boolean invoke(boolean a, boolean b) { return Math.round( invoke( a ? 1 : 0, b ? 1 : 0 ) ) != 0; } // Some default behaviors, it might make sense to override this for some activations. + default boolean invoke(boolean a, boolean b) { + return invoke( a ? 1 : 0, b ? 1 : 0 ) != 0; // Some default behaviors, it might make sense to override this for some activations. + } - default char invoke(char a, char b) { return (char) Math.round( invoke( (int) a, (int) b ) ); } // Some default behaviors, it might make sense to override this for some activations. + default char invoke(char a, char b) { + return (char) invoke( a, (int) b ); // Some default behaviors, it might make sense to override this for some activations. + } - default Object invoke(Object a, Object b) { throw new IllegalStateException("Not implemented for operation "+getClass().getSimpleName()); } + default Object invoke(Object a, Object b) { + throw new IllegalStateException("Not implemented for operation "+getClass().getSimpleName()); + } } diff --git a/src/main/java/neureka/backend/main/implementations/fun/api/CPUFun.java b/src/main/java/neureka/backend/main/implementations/fun/api/CPUFun.java index 436d20bb6..75cd9482c 100644 --- a/src/main/java/neureka/backend/main/implementations/fun/api/CPUFun.java +++ b/src/main/java/neureka/backend/main/implementations/fun/api/CPUFun.java @@ -4,20 +4,36 @@ public strictfp interface CPUFun { double invoke(double x); - default float invoke(float x) { return (float) invoke( (double) x ); } + default float invoke(float x) { + return (float) invoke( (double) x ); + } - default int invoke(int x) { return (int) Math.round( invoke( (double) x ) ); } + default int invoke(int x) { + return (int) Math.round( invoke( (double) x ) ); + } - default long invoke(long x) { return Math.round( invoke( (double) x ) ); } + default long invoke(long x) { + return Math.round( invoke( (double) x ) ); + } - default byte invoke(byte x) { return (byte) Math.round( invoke( (double) x ) ); } + default byte invoke(byte x) { + return (byte) Math.round( invoke( (double) x ) ); + } - default short invoke(short x) { return (short) Math.round( invoke( (double) x ) ); } + default short invoke(short x) { + return (short) Math.round( invoke( (double) x ) ); + } - default boolean invoke(boolean x) { return Math.round( invoke( x ? 1 : 0 ) ) != 0; } // Some default behaviors, it might make sense to override this for some activations. + default boolean invoke(boolean x) { + return invoke( x ? 1 : 0 ) != 0; // Some default behaviors, it might make sense to override this for some activations. + } - default char invoke(char x) { return (char) Math.round( invoke( (int) x ) ); } // Some default behaviors, it might make sense to override this for some activations. + default char invoke(char x) { + return (char) invoke( (int) x ); // Some default behaviors, it might make sense to override this for some activations. + } - default Object invoke(Object x) { throw new IllegalStateException("Not implemented for operation "+getClass().getSimpleName()); } + default Object invoke(Object x) { + throw new IllegalStateException("Not implemented for operation "+getClass().getSimpleName()); + } } diff --git a/src/main/java/neureka/backend/main/operations/linear/MatMul.java b/src/main/java/neureka/backend/main/operations/linear/MatMul.java index 5088e9e6a..02075fcc5 100644 --- a/src/main/java/neureka/backend/main/operations/linear/MatMul.java +++ b/src/main/java/neureka/backend/main/operations/linear/MatMul.java @@ -1,9 +1,15 @@ package neureka.backend.main.operations.linear; +import neureka.Neureka; +import neureka.backend.api.ExecutionCall; +import neureka.backend.api.Result; +import neureka.backend.api.template.algorithms.AbstractDeviceAlgorithm; import neureka.backend.api.template.operations.AbstractOperation; import neureka.backend.api.template.operations.OperationBuilder; import neureka.backend.main.algorithms.MatMulAlgorithm; import neureka.math.Function; +import neureka.math.args.Arg; +import neureka.math.parsing.FunctionParser; public class MatMul extends AbstractOperation { @@ -26,5 +32,36 @@ public MatMul() } @Override - public double calculate( double[] inputs, int j, int d, Function[] src ) { return src[ 0 ].call( inputs, j ); } + public Result execute( final Function caller, final ExecutionCall call ) + { + if ( !caller.isFlat() ) { + Function reducedCaller = reducePairwise(caller); + ExecutionCall flatCall = AbstractDeviceAlgorithm.flatten( reducedCaller, call.withArgs(Arg.DerivIdx.of(-1)) ); + Function flat = new FunctionParser(Neureka.get().backend()).parse( flatCall.getOperation(), flatCall.arity(), true ); + return super.execute( flat, flatCall ); + } + return super.execute( reducePairwise(caller), call ); + } + + private Function reducePairwise( final Function fun ) { + Function reduced = fun; + if ( reduced.getSubFunctions().size() > 2 ) { + /* + So currently we have something like this: a@b@c@d... + However, this is how it is really executed: ((((a@b)@c)@d)..) + ...so let's create a function that is nested like the above: + */ + Function nested = reduced.getSubFunctions().get(0); + for ( int i = 1; i < reduced.getSubFunctions().size(); i++ ) + nested = Function.of( nested + " @ " + reduced.getSubFunctions().get(i), true ); + + reduced = nested; + } + return reduced; + } + + @Override + public double calculate( double[] inputs, int j, int d, Function[] src ) { + return src[ 0 ].call( inputs, j ); + } } diff --git a/src/main/java/neureka/backend/main/operations/operator/Multiplication.java b/src/main/java/neureka/backend/main/operations/operator/Multiplication.java index 59ebf4982..237f8361f 100644 --- a/src/main/java/neureka/backend/main/operations/operator/Multiplication.java +++ b/src/main/java/neureka/backend/main/operations/operator/Multiplication.java @@ -3,6 +3,7 @@ import neureka.Neureka; import neureka.Tensor; import neureka.autograd.ADAction; +import neureka.autograd.GraphNode; import neureka.backend.api.AutoDiffMode; import neureka.backend.api.Call; import neureka.backend.api.ExecutionCall; @@ -80,15 +81,17 @@ public Multiplication() @Override public Result execute( final Function caller, final ExecutionCall call ) { + int d = call.getDerivativeIndex(); if ( !caller.isFlat() ) { - int d = call.getDerivativeIndex(); if ( d < 0 ) { Function reducedCaller = reducePairwise(caller); ExecutionCall flatCall = AbstractDeviceAlgorithm.flatten( reducedCaller, call.withArgs(Arg.DerivIdx.of(-1)) ); + for ( Tensor input : flatCall.inputs() ) + input.mut().setIsIntermediate( false ); Function flat = new FunctionParser(Neureka.get().backend()).parse( flatCall.getOperation(), flatCall.arity(), true ); Result r = super.execute( flat, flatCall ); - //for ( int i = 0; i < flatCall.inputs().length; i++ ) - // _deleteIfNotIn(call.inputs(), flatCall.input(i)); // TODO: Make it possible to delete more stuff + for ( int i = 0; i < flatCall.inputs().length; i++ ) + _deleteIfNotIn(call.inputs(), flatCall.input(i)); return r; } else { if ( !call.validate().all( (a, b) -> Util.canBeBroadcast(a.shape(), b.shape()) ).isValid() ) @@ -109,7 +112,28 @@ public Result execute( final Function caller, final ExecutionCall call ) } ); } } - return super.execute( reducePairwise(caller), call ); + + + Function reduced = reducePairwise(caller); + //ExecutionCall flatCall = call; + //Function flat = caller; + //if ( d < 0 && caller.isFlat() && subFunctions.stream().anyMatch( f -> f instanceof FunctionConstant) ) { + // Function noAd = Function.of( caller.toString(), false ); + // ExecutionCall flatCall = AbstractDeviceAlgorithm.flatten( noAd, call.withArgs(Arg.DerivIdx.of(-1)) ); + // return super.execute( reducePairwise(caller), call ); + //} + if ( reduced.equals(caller) && reduced.isFlat() ) + return super.execute( reduced, call ); + else + return this.execute( reduced, call ); + } + + private void _deleteIfNotIn( Tensor[] inputs, Tensor input ) { + for ( Tensor i : inputs ) { + if ( i == input ) return; + } + if ( input.getGraphNode().map(GraphNode::canBeDeleted).orElse(true) ) + input.mut().delete(); } public static Result derive( diff --git a/src/main/java/neureka/devices/CustomDeviceCleaner.java b/src/main/java/neureka/devices/CustomDeviceCleaner.java index 6c990316d..13f5febec 100644 --- a/src/main/java/neureka/devices/CustomDeviceCleaner.java +++ b/src/main/java/neureka/devices/CustomDeviceCleaner.java @@ -1,5 +1,8 @@ package neureka.devices; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import java.lang.ref.PhantomReference; import java.lang.ref.ReferenceQueue; import java.util.ArrayList; @@ -15,61 +18,112 @@ * anywhere but within this library.
* This class or its public methods might change or get removed in future versions! */ -final class CustomDeviceCleaner implements DeviceCleaner, Runnable +final class CustomDeviceCleaner implements DeviceCleaner { + private static final Logger log = LoggerFactory.getLogger(CustomDeviceCleaner.class); + private static final CustomDeviceCleaner _INSTANCE = new CustomDeviceCleaner(); + private static final long _QUEUE_TIMEOUT = 60 * 1000; + private final ReferenceQueue _referenceQueue = new ReferenceQueue<>(); - private final long _timeout = 60 * 1000; - private int _registered = 0; + private final List> _toBeCleaned = new ArrayList<>(); + private final Thread _thread; + + + public static CustomDeviceCleaner getInstance() { + return _INSTANCE; + } + + CustomDeviceCleaner() { + _thread = new Thread(this::run, "Neureka-Cleaner"); + } - List list = new ArrayList<>(); static class ReferenceWithCleanup extends PhantomReference { - private final Runnable _action; + private Runnable _action; - ReferenceWithCleanup(T o, Runnable action, ReferenceQueue queue) { + ReferenceWithCleanup( T o, Runnable action, ReferenceQueue queue ) { super( o, queue ); _action = action; } public void cleanup() { - _action.run(); + if ( _action != null ) { + try { + _action.run(); + } catch (Exception e) { + log.error("Failed to execute cleanup action '"+_action+"'.", e); + } finally { + _action = null; + } + } } } - @Override - public void register(Object o, Runnable action) { + public void register( Object o, Runnable action ) { + if ( o == null ) { + log.warn("Attempt to register a null object for cleanup. This is not allowed!"); + try { + action.run(); + } catch (Exception e) { + log.error("Failed to execute cleanup action '"+action+"'.", e); + } + return; + } synchronized ( _referenceQueue ) { - list.add(new ReferenceWithCleanup(o, action, _referenceQueue)); - _registered++; - if ( _registered == 1 ) new Thread( this::run ).start(); + _toBeCleaned.add(new ReferenceWithCleanup<>(o, action, _referenceQueue)); + if ( _toBeCleaned.size() == 1 ) { + if ( !_thread.isAlive() ) { + _thread.start(); + } + else { + // We notify the cleaner thread that there are new items to be cleaned + synchronized ( _thread ) { + _thread.notify(); + } + } + } } } - @Override - public void run() { - while ( _registered > 0 ) { + private void run() { + if ( !_thread.isAlive() ) { + _thread.start(); + } + while ( _thread.isAlive() ) { + while ( !_toBeCleaned.isEmpty() ) { + checkCleanup(); + } try { - ReferenceWithCleanup ref = (ReferenceWithCleanup) _referenceQueue.remove(_timeout); - if ( ref != null ) { - try { - ref.cleanup(); - } catch ( Throwable e ) { - e.printStackTrace(); - // ignore exceptions from the cleanup action - // (including interruption of cleanup thread) - } - _registered--; + synchronized ( _thread ) { + _thread.wait(); + } + } catch (Exception e) { + log.error("Failed to make cleaner thread wait for cleaning notification!", e); + } + } + } + + private void checkCleanup() { + try { + ReferenceWithCleanup ref = (ReferenceWithCleanup) _referenceQueue.remove(_QUEUE_TIMEOUT); + if ( ref != null ) { + try { + ref.cleanup(); + } catch ( Throwable e ) { + log.error("Failed to perform cleanup!", e); + } finally { + _toBeCleaned.remove(ref); } - } catch ( Throwable e ) { - e.printStackTrace(); // The queue failed } + } catch ( Throwable e ) { + log.error("Failed to call 'remove()' on cleaner internal queue.", e); } } @Override public String toString() { return this.getClass().getSimpleName()+"@"+Integer.toHexString(this.hashCode())+"[" + - "registered=" + _registered + + "registered=" + _toBeCleaned.size() + "]"; } diff --git a/src/main/java/neureka/devices/opencl/OpenCLDevice.java b/src/main/java/neureka/devices/opencl/OpenCLDevice.java index dd4d22215..a998fe31b 100644 --- a/src/main/java/neureka/devices/opencl/OpenCLDevice.java +++ b/src/main/java/neureka/devices/opencl/OpenCLDevice.java @@ -1011,6 +1011,10 @@ public boolean equals(Object obj) { if ( !(obj instanceof cl_tsr) ) return false; return ((cl_tsr) obj).value == this.value; } + + @Override public int hashCode() { + return value.hashCode(); + } } /** diff --git a/src/main/java/neureka/ndim/config/NDConfiguration.java b/src/main/java/neureka/ndim/config/NDConfiguration.java index 10c5252ae..0ccddbcda 100644 --- a/src/main/java/neureka/ndim/config/NDConfiguration.java +++ b/src/main/java/neureka/ndim/config/NDConfiguration.java @@ -45,6 +45,11 @@ of this software and associated documentation files (the "Software"), to deal */ public interface NDConfiguration { + /** + * @return A {@link NDConfiguration} instance which represents the absence of a configuration. + */ + static NDConfiguration none() { return NoOpNDConfig.INSTANCE; } + static NDConfiguration of( int[] shape, // The shape of the tensor. int[] strides, // Strides are the distances between elements of a tensor in each dimension. diff --git a/src/main/java/neureka/ndim/config/NoOpNDConfig.java b/src/main/java/neureka/ndim/config/NoOpNDConfig.java new file mode 100644 index 000000000..de3b20d27 --- /dev/null +++ b/src/main/java/neureka/ndim/config/NoOpNDConfig.java @@ -0,0 +1,89 @@ +package neureka.ndim.config; + +final class NoOpNDConfig implements NDConfiguration +{ + + static final NoOpNDConfig INSTANCE = new NoOpNDConfig(); + + private NoOpNDConfig() {} + + @Override + public int rank() { + return 0; + } + + @Override + public int[] shape() { + return new int[0]; + } + + @Override + public int shape(int i) { + return 0; + } + + @Override + public int[] indicesMap() { + return new int[0]; + } + + @Override + public int indicesMap(int i) { + return 0; + } + + @Override + public int[] strides() { + return new int[0]; + } + + @Override + public int strides(int i) { + return 0; + } + + @Override + public int[] spread() { + return new int[0]; + } + + @Override + public int spread(int i) { + return 0; + } + + @Override + public int[] offset() { + return new int[0]; + } + + @Override + public int offset(int i) { + return 0; + } + + @Override + public int indexOfIndex(int index) { + return 0; + } + + @Override + public int[] indicesOfIndex(int index) { + return new int[0]; + } + + @Override + public int indexOfIndices(int[] indices) { + return 0; + } + + @Override + public boolean equals(NDConfiguration ndc) { + return false; + } + + @Override + public NDConfiguration newReshaped(int[] newForm) { + return this; + } +} diff --git a/src/test/groovy/it/Eleven_Lines_NN_System_Spec.groovy b/src/test/groovy/it/Eleven_Lines_NN_System_Spec.groovy index 7657b5815..cce965702 100644 --- a/src/test/groovy/it/Eleven_Lines_NN_System_Spec.groovy +++ b/src/test/groovy/it/Eleven_Lines_NN_System_Spec.groovy @@ -58,8 +58,8 @@ class Eleven_Lines_NN_System_Spec extends Specification { } expect : - W1.mut.data.get().collect({it.round 14}) == RESULT_W1.collect({it.round 14}) - W2.mut.data.get().collect({it.round 14}) == RESULT_W2.collect({it.round 14}) + W1.mut.data.get().collect({it.round 12}) == RESULT_W1.collect({it.round 12}) + W2.mut.data.get().collect({it.round 12}) == RESULT_W2.collect({it.round 12}) } def 'One can write a simple neural network in less than 11 lines of code!'() @@ -76,8 +76,8 @@ class Eleven_Lines_NN_System_Spec extends Specification { } expect : - W1.mut.data.get().collect({it.round 14}) == RESULT_W1.collect({it.round 14}) - W2.mut.data.get().collect({it.round 14}) == RESULT_W2.collect({it.round 14}) + W1.mut.data.get().collect({it.round 12}) == RESULT_W1.collect({it.round 12}) + W2.mut.data.get().collect({it.round 12}) == RESULT_W2.collect({it.round 12}) } diff --git a/src/test/groovy/st/Broad_System_Test.groovy b/src/test/groovy/st/Broad_System_Test.groovy index 3c1700e79..6161d7e1e 100644 --- a/src/test/groovy/st/Broad_System_Test.groovy +++ b/src/test/groovy/st/Broad_System_Test.groovy @@ -1,9 +1,11 @@ package st import neureka.Neureka +import neureka.Shape +import neureka.Tensor import neureka.view.NDPrintSettings -import st.tests.BroadSystemTest import spock.lang.Specification +import st.tests.BroadSystemTest class Broad_System_Test extends Specification { @@ -32,4 +34,87 @@ class Broad_System_Test extends Specification BroadSystemTest.on() // This is the actual test. } + def 'A function with expression "softplus((I[0]xI[1])*-100)" can be backpropagated.'() + { + given : + Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(true); + Tensor tensor1 = Tensor.of(Shape.of(1, 3), 2d); + Tensor tensor2 = Tensor.of(Double).withShape(2, 1).all(-1.0); + tensor1.setRqsGradient(true); + tensor2.setRqsGradient(true); + + when : + Tensor result1 = Tensor.of("softplus((I[0]xI[1])*-100)", [tensor1, tensor2]); + Tensor result2 = (Tensor.of("i0 x i1", tensor1, tensor2)*-100).softplus(); + then : + result1.toString() == "[2x3]:(200.0, 200.0, 200.0, 200.0, 200.0, 200.0); ->d[2x3]:(-100.0, -100.0, -100.0, -100.0, -100.0, -100.0)" + result2.toString() == "[2x3]:(200.0, 200.0, 200.0, 200.0, 200.0, 200.0); ->d[2x3]:(-100.0, -100.0, -100.0, -100.0, -100.0, -100.0)" + + when : 'We perform a backwards pass of a gradient of `-0.1`:' + result1.backward( -0.1 ); + then : + tensor1.gradient.get().toString() == "[1x3]:(-20.0, -20.0, -20.0)" + tensor2.gradient.get().toString() == "[2x1]:(60.0, 60.0)" + + when : 'We perform a backwards pass of a gradient of `-0.1`:' + result2.backward( -0.1 ); + then : + tensor1.gradient.get().toString() == "[1x3]:(-40.0, -40.0, -40.0)" + tensor2.gradient.get().toString() == "[2x1]:(120.0, 120.0)" + } + + def 'A function with expression "softplus(tanh(I[0]*I[1]*2)*I[1])" can be backpropagated.'() + { + given : + Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(true); + Tensor tensor1 = Tensor.of(Shape.of(2), 2d); + Tensor tensor2 = Tensor.of(Shape.of(2), 4d); + tensor1.setRqsGradient(true); + tensor2.setRqsGradient(true); + + when : + Tensor result1 = Tensor.of("softplus(tanh(I[0]*I[1]*2)*I[1])", [tensor1, tensor2]); + Tensor result2 = ((tensor1 * tensor2 * 2).tanh()*tensor2).softplus(); + then : + result1.toString({it.hasDerivatives=false}) == "[2]:(4.01815, 4.01815)" + result2.toString({it.hasDerivatives=false}) == "[2]:(4.01815, 4.01815)" + + when : 'We perform a backwards pass of a gradient of `100`:' + result1.backward( 100 ); + then : + tensor1.gradient.get().toString() == "[2]:(159.09e-12, 159.09e-12)" + tensor2.gradient.get().toString() == "[2]:(98.2014, 98.2014)" + + when : 'We perform a backwards pass of a gradient of `100`:' + result2.backward( 100 ); + then : + tensor1.gradient.get().toString() == "[2]:(318.18e-12, 318.18e-12)" + tensor2.gradient.get().toString() == "[2]:(196.403, 196.403)" + } + + def 'A function with expression "(-3*(2*(i0*-1)))*(-1*i0)" can be backpropagated.'() + { + given : + Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(true); + Tensor tensor1 = Tensor.of(Shape.of(1), 2d);//-2*4 = 8 | *3 = -24 + tensor1.setRqsGradient(true); + + when : + Tensor result1 = Tensor.of("(-3*(2*(i0*-1)))*(-1*i0)", [tensor1]); + Tensor result2 = ((((tensor1*-1)*2)*-3)*(tensor1*-1)); + then : + result1.toString({it.hasDerivatives=false}) == "[1]:(-24.0)" + result2.toString({it.hasDerivatives=false}) == "[1]:(-24.0)" + + when : 'We perform a backwards pass of a gradient of `2`:' + result1.backward( 2 ); + then : + tensor1.gradient.get().toString() == "[1]:(-48.0)" + + when : 'We perform a backwards pass of a gradient of `2`:' + result2.backward( 2 ); + then : + tensor1.gradient.get().toString() == "[1]:(-96.0)" + } + } diff --git a/src/test/groovy/st/tests/BroadSystemTest.java b/src/test/groovy/st/tests/BroadSystemTest.java index ce0167ef4..ade1a973a 100644 --- a/src/test/groovy/st/tests/BroadSystemTest.java +++ b/src/test/groovy/st/tests/BroadSystemTest.java @@ -118,20 +118,6 @@ public static boolean on() " =>d|[ [1x3]:(2.0, 2.0, 2.0) ]|:t{ [2x1]:(-1.0, -1.0) }" }); //--- - tensor1 = Tensor.of(Shape.of(1, 3), 2d); - tensor2 = Tensor.of(Double.class).withShape(2, 1).all(-1.0); - tensor1.setRqsGradient(true); - tensor2.setRqsGradient(true); - tester.testTensorAutoGrad( - new Tensor[]{tensor1, tensor2}, - "lig((I[0]xI[1])*-100)", - new String[]{ - "[2x3]:(200.0, 200.0, 200.0, 200.0, 200.0, 200.0);", - " =>d|[ [2x3]:(-100.0, -100.0, -100.0, -100.0, -100.0, -100.0) ]|:t{ [2x3]:(-2.0, -2.0, -2.0, -2.0, -2.0, -2.0);", - " =>d|[ [1x3]:(2.0, 2.0, 2.0) ]|:t{ [2x1]:(-1.0, -1.0) }", - " =>d|[ [2x1]:(-1.0, -1.0) ]|:t{ [1x3]:(2.0, 2.0, 2.0) }", - } - ); //---//Broken down to 2 functions: tensor1 = Tensor.of(Shape.of(1, 3), 2d); tensor2 = Tensor.of(Shape.of(2, 1), -1d); @@ -156,26 +142,6 @@ public static boolean on() new String[]{"[4x3x2]:(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0)"} ); //--- - tensor1 = Tensor.of(Shape.of(2), 2d); - tensor2 = Tensor.of(Shape.of(2), 4d); - tensor1.setRqsGradient(true); - tensor2.setRqsGradient(true); - tester.testTensorAutoGrad( - new Tensor[]{tensor1, tensor2}, - "lig(tanh(I[0]*I[1]*2)*I[1])", - new String[]{ - "[2]:(4.01815, 4.01815); ", - "=>d|[ [2]:(3.92806, 3.92806) ]|" + - ":t{ [2]:(0.99999, 0.99999); ", - "=>d|[ [2]:(202.505e-15, 202.505e-15) ]|" + - ":t{ [2]:(4.0, 4.0) }", - "=>d|[ [2]:(405.009e-15, 405.009e-15) ]|" + - ":t{ [2]:(2.0, 2.0) }", - "}, ", - "=>d|[ [2]:(0.98201, 0.98201) ]|" + - ":t{ [2]:(4.0, 4.0) }" - } - ); //--- Neureka.get().settings().debug().setIsKeepingDerivativeTargetPayloads(true); tensor1 = Tensor.of(Shape.of(3, 2, 1), 4d); @@ -254,11 +220,6 @@ public static boolean on() } ); //===================== - _subTest(tester); - //===================== - _subTest2(tester); - //===================== - // TESTING INVERSE: //================= @@ -524,109 +485,4 @@ public static boolean on() //=========================================== return true; } - - private static void _subTest(UnitTester_Tensor tester) { - - Neureka.get().settings().debug().setIsDeletingIntermediateTensors(false); - - Tensor tensor1 = Tensor.of(Shape.of(1), 2d);//-2*4 = 8 | *3 = -24 - tensor1.setRqsGradient(true); - tester.testTensorAutoGrad( - new Tensor[]{tensor1},//2 =>-2 =>-4 =>12 //2 =>-2 //-2,12 =>-24 - "(-3*(2*(i0*-1)))*(-1*i0)", - new String[]{ - "[1]:(-24.0); ", - "=>d|[ [1]:(12.0) ]|:" + - "t{ [1]:(-2.0); " + - "=>d|[ [1]:(-1.0) ]|:" + - "t{ [1]:(2.0) } " + - "}", - "=>d|[ [1]:(-2.0) ]|:" + - "t{ [1]:(12.0); " + - "=>d|[ [1]:(6.0) ]|:" + - "t{ [1]:(2.0) } " + - "}" - } - ); - - Neureka.get().settings().debug().setIsDeletingIntermediateTensors(true); - - - tensor1 = Tensor.of(Shape.of(1), 2d);//-2*4 = 8 | *3 = -24 - tensor1.setRqsGradient(true); - tester.testTensorAutoGrad( - new Tensor[]{tensor1},//2 =>-2 =>-4 =>12 //2 =>-2 //-2,12 =>-24 - "(-3*(2*(i0*-1)))*(-1*i0)", - new String[]{ - "[1]:(-24.0); ", - "=>d|[ [1]:(12.0) ]|:" + - "t{ [1]:(-2.0); =>d|[ [1]:(-1.0) ]|:t{ [1]:(2.0) } }", - "=>d|[ [1]:(-2.0) ]|:" + - "t{ [1]:(12.0); " + - "=>d|[ [1]:(6.0) ]|:" + - "t{ [1]:(2.0) } " + - "}" - } - ); -/* - [1]:(-24.0); - =>d|[ [1]:(-2.0) ]|:t{ - [1]:(12.0); - =>d|[ [1]:(6.0) ]|:t{ [1]:(2.0) } - }, - =>d|[ [1]:(12.0) ]|:t{ - [1]:(-2.0); - =>d|[ [1]:(-1.0) ]|:t{ [1]:(2.0) } - } - */ - - } - - - private static void _subTest2(UnitTester_Tensor tester) { - - Neureka.get().settings().debug().setIsDeletingIntermediateTensors(false); - Tensor tensor1 = Tensor.of(Shape.of(1), 2d);//-2*4 = 8 | *3 = -24 - tensor1.setRqsGradient(true); - Tensor result = Tensor.of("(-3*(2*(i0*-1)))*(-1*i0)", tensor1); - GraphNode node = (GraphNode) result.get( GraphNode.class ); - String asString = node.toString(GraphNode.Print.FANCY); - tester.testContains( - asString, - new String[]{ - "[1]:(-24.0)", - "[1]:(12.0)", - "[1]:(2.0)", - "[1]:(-3.0)", - "(-1.0 * I[0])", - "(I[0] * -1.0)", - "(I[0] * I[1])", - "LEAVE RQS GRADIENT", - "(I[0] * I[1]) => [1]:(-4.0)" - }, - "Testing 'toString' of GraphNode"); - Neureka.get().settings().debug().setIsDeletingIntermediateTensors(true); - result = Tensor.of("(-3*(2*(i0*-1)))*(-1*i0)", tensor1); - node = (GraphNode) result.get( GraphNode.class ); - asString = node.toString(GraphNode.Print.FANCY); - tester.testContains( - asString, - new String[]{ - "(I[0] * I[1]) => [1]:(-24.0), type='BRANCH'", - "[1]:(12.0)", - "[1]:(2.0)", - //"[1]:(-3.0)", - "(-1.0 * I[0])", - "(I[0] * -1.0)", - "(I[0] * I[1])", - "LEAVE RQS GRADIENT", - //"deleted", - //"(I[0] * -1.0) => deleted, type='BRANCH'" - "(I[0] * -1.0) =>", - "type='LEAVE DELETED'", - "[1]:(2.0), type='LEAVE RQS GRADIENT'" - }, - "Testing 'toString' of GraphNode"); - } - } diff --git a/src/test/groovy/ut/device/Cross_Device_Type_Spec.groovy b/src/test/groovy/ut/device/Cross_Device_Type_Spec.groovy index 9ecb27c6a..f22d3a53a 100644 --- a/src/test/groovy/ut/device/Cross_Device_Type_Spec.groovy +++ b/src/test/groovy/ut/device/Cross_Device_Type_Spec.groovy @@ -112,7 +112,7 @@ class Cross_Device_Type_Spec extends Specification expect : 'Querying for a device using a device type and key works as expected.' Device.get(type, key) === expected and : 'We can use the "find" method if we want the result to be wrapped in a nice and safe Optional instance.' - Device.find(type, key).isEmpty() && expected == null || Device.find(type, key).get() === expected + !Device.find(type, key).isPresent() && expected == null || Device.find(type, key).get() === expected where : 'The we can use the following device type, key and expected device instance.' type | key || expected @@ -133,7 +133,7 @@ class Cross_Device_Type_Spec extends Specification and : 'The "any" method returns a device instance or the "CPU device", which is the library default device.' Device.any(key) === expected || Device.any(key) === CPU.get() and : 'The "find" method returns a device instance wrapped in an Optional instance.' - Device.find(key).isEmpty() && expected == null || Device.find(key).get() === expected + !Device.find(key).isPresent() && expected == null || Device.find(key).get() === expected where : 'The we can use the following search key and expected device instance.' key || expected diff --git a/src/test/groovy/ut/utility/Cleaner_Testing.groovy b/src/test/groovy/ut/utility/Cleaner_Testing.groovy index 53c6cbd6b..bb93b4b57 100644 --- a/src/test/groovy/ut/utility/Cleaner_Testing.groovy +++ b/src/test/groovy/ut/utility/Cleaner_Testing.groovy @@ -61,7 +61,7 @@ class Cleaner_Testing extends Specification System.gc() then : - Sleep.until(700, { refCount == 8 && cleaner._registered == 8 }) + Sleep.until(700, { refCount == 8 && cleaner._toBeCleaned.size() == 8 }) r1 == null r2 != null r3 == null @@ -79,7 +79,7 @@ class Cleaner_Testing extends Specification System.gc() then : - Sleep.until(750, { refCount == 6 && cleaner._registered == 6 }) + Sleep.until(750, { refCount == 6 && cleaner._toBeCleaned.size() == 6 }) r1 == null r2 == null r3 == null diff --git a/src/test/kotlin/Kotlin_Compatibility_Unit_Testing.kt b/src/test/kotlin/Kotlin_Compatibility_Unit_Testing.kt index c328aaa93..ee7c199f3 100644 --- a/src/test/kotlin/Kotlin_Compatibility_Unit_Testing.kt +++ b/src/test/kotlin/Kotlin_Compatibility_Unit_Testing.kt @@ -13,6 +13,7 @@ import neureka.dtype.DataType import neureka.optimization.Optimizer import org.junit.jupiter.api.BeforeEach import org.junit.jupiter.api.Test +import java.util.* internal class @@ -326,7 +327,7 @@ Kotlin_Compatibility_Unit_Testing { assert(e1.toString() == "(1x1x1):[42.0+666.0i]") // When : - slice.mut.assign(slice.map { it.toUpperCase() }) + slice.mut.assign(slice.map { it.uppercase(Locale.getDefault()) }) // Then : assert(